]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.0.4-201505222222.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.0.4-201505222222.patch
CommitLineData
57e29f20
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 74b6c6d..eac0e77 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 4d68ec8..9546b75 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 3d16bcc..a3b342e 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -884,7 +952,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -934,6 +1002,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -943,7 +1013,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -986,10 +1056,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1103,6 +1176,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1118,7 +1193,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1184,7 +1259,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer vmlinux-gdb.py
547+ signing_key.x509.signer vmlinux-gdb.py \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1223,7 +1301,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1389,6 +1467,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1529,17 +1609,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1551,11 +1635,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index a9a1195..e9b8417 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -101,6 +101,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index cf4c0c9..a87ecf5 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..abe7041 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1367+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index 674d03f..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 563b92f..689d58e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -39,7 +39,7 @@ struct outer_cache_fns {
1842 /* This is an ARM L2C thing */
1843 void (*write_sec)(unsigned long, unsigned);
1844 void (*configure)(const struct l2x0_regs *);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index bfd662e..f6cbb02 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -127,6 +127,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a745a2a..481350a 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -80,6 +80,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -91,10 +92,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index f403541..b10df68 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index 72812a1..335f4f3 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -77,9 +77,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 }
2125
2126 #define init_thread_info (init_thread_union.thread_info)
2127@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index ce0786e..a80c264 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a, b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x, p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x, p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check((x), (p)); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x, p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x, p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check((x), (p)); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2260 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x, ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x), (ptr), __gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x, ptr, err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x), (ptr), err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x, ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x), (ptr), __pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x, ptr, err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x), (ptr), err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 672b219..4aa120a 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -48,6 +48,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -90,11 +171,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -479,7 +576,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -513,11 +612,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -767,7 +871,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -776,7 +880,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 0196327..50ac8895 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -444,7 +444,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index 2e11961..07f0704 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 69bda1a..755113a 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index 2bf1a16..d959d40 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -213,6 +213,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -226,7 +227,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -252,8 +253,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -430,12 +431,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -451,7 +446,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -480,81 +475,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f90fdf4..24e8c84 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -26,7 +26,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index 1d60beb..4aa25d5 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3058 * Register 0 and check for VMSAv7 or PMSAv7 */
3059 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 023ac90..0a69950 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index b652af5..60231ab 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1087,7 +1087,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3438index 318d127..9aab0d1 100644
3439--- a/arch/arm/mach-exynos/suspend.c
3440+++ b/arch/arm/mach-exynos/suspend.c
3441@@ -18,6 +18,7 @@
3442 #include <linux/syscore_ops.h>
3443 #include <linux/cpu_pm.h>
3444 #include <linux/io.h>
3445+#include <linux/irq.h>
3446 #include <linux/irqchip/arm-gic.h>
3447 #include <linux/err.h>
3448 #include <linux/regulator/machine.h>
3449@@ -632,8 +633,10 @@ void __init exynos_pm_init(void)
3450 tmp |= pm_data->wake_disable_mask;
3451 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3452
3453- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3454- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3455+ pax_open_kernel();
3456+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3457+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3458+ pax_close_kernel();
3459
3460 register_syscore_ops(&exynos_pm_syscore_ops);
3461 suspend_set_ops(&exynos_suspend_ops);
3462diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3463index 0662087..004d163 100644
3464--- a/arch/arm/mach-keystone/keystone.c
3465+++ b/arch/arm/mach-keystone/keystone.c
3466@@ -27,7 +27,7 @@
3467
3468 #include "keystone.h"
3469
3470-static struct notifier_block platform_nb;
3471+static notifier_block_no_const platform_nb;
3472 static unsigned long keystone_dma_pfn_offset __read_mostly;
3473
3474 static int keystone_platform_notifier(struct notifier_block *nb,
3475diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3476index e46e9ea..9141c83 100644
3477--- a/arch/arm/mach-mvebu/coherency.c
3478+++ b/arch/arm/mach-mvebu/coherency.c
3479@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3480
3481 /*
3482 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3483- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3484+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3485 * is needed as a workaround for a deadlock issue between the PCIe
3486 * interface and the cache controller.
3487 */
3488@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3489 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3490
3491 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3492- mtype = MT_UNCACHED;
3493+ mtype = MT_UNCACHED_RW;
3494
3495 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3496 }
3497diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3498index b6443a4..20a0b74 100644
3499--- a/arch/arm/mach-omap2/board-n8x0.c
3500+++ b/arch/arm/mach-omap2/board-n8x0.c
3501@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3502 }
3503 #endif
3504
3505-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3506+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3507 .late_init = n8x0_menelaus_late_init,
3508 };
3509
3510diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3511index 79f49d9..70bf184 100644
3512--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3513+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3514@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3515 void (*resume)(void);
3516 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3517 void (*hotplug_restart)(void);
3518-};
3519+} __no_const;
3520
3521 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3522 static struct powerdomain *mpuss_pd;
3523@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3524 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3525 {}
3526
3527-struct cpu_pm_ops omap_pm_ops = {
3528+static struct cpu_pm_ops omap_pm_ops __read_only = {
3529 .finish_suspend = default_finish_suspend,
3530 .resume = dummy_cpu_resume,
3531 .scu_prepare = dummy_scu_prepare,
3532diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3533index 5305ec7..6d74045 100644
3534--- a/arch/arm/mach-omap2/omap-smp.c
3535+++ b/arch/arm/mach-omap2/omap-smp.c
3536@@ -19,6 +19,7 @@
3537 #include <linux/device.h>
3538 #include <linux/smp.h>
3539 #include <linux/io.h>
3540+#include <linux/irq.h>
3541 #include <linux/irqchip/arm-gic.h>
3542
3543 #include <asm/smp_scu.h>
3544diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3545index f961c46..4a453dc 100644
3546--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3547+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3548@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3549 return NOTIFY_OK;
3550 }
3551
3552-static struct notifier_block __refdata irq_hotplug_notifier = {
3553+static struct notifier_block irq_hotplug_notifier = {
3554 .notifier_call = irq_cpu_hotplug_notify,
3555 };
3556
3557diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3558index be9541e..821805f 100644
3559--- a/arch/arm/mach-omap2/omap_device.c
3560+++ b/arch/arm/mach-omap2/omap_device.c
3561@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3562 struct platform_device __init *omap_device_build(const char *pdev_name,
3563 int pdev_id,
3564 struct omap_hwmod *oh,
3565- void *pdata, int pdata_len)
3566+ const void *pdata, int pdata_len)
3567 {
3568 struct omap_hwmod *ohs[] = { oh };
3569
3570@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3571 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3572 int pdev_id,
3573 struct omap_hwmod **ohs,
3574- int oh_cnt, void *pdata,
3575+ int oh_cnt, const void *pdata,
3576 int pdata_len)
3577 {
3578 int ret = -ENOMEM;
3579diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3580index 78c02b3..c94109a 100644
3581--- a/arch/arm/mach-omap2/omap_device.h
3582+++ b/arch/arm/mach-omap2/omap_device.h
3583@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3584 /* Core code interface */
3585
3586 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3587- struct omap_hwmod *oh, void *pdata,
3588+ struct omap_hwmod *oh, const void *pdata,
3589 int pdata_len);
3590
3591 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3592 struct omap_hwmod **oh, int oh_cnt,
3593- void *pdata, int pdata_len);
3594+ const void *pdata, int pdata_len);
3595
3596 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3597 struct omap_hwmod **ohs, int oh_cnt);
3598diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3599index 355b089..2c9d7c3 100644
3600--- a/arch/arm/mach-omap2/omap_hwmod.c
3601+++ b/arch/arm/mach-omap2/omap_hwmod.c
3602@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3603 int (*init_clkdm)(struct omap_hwmod *oh);
3604 void (*update_context_lost)(struct omap_hwmod *oh);
3605 int (*get_context_lost)(struct omap_hwmod *oh);
3606-};
3607+} __no_const;
3608
3609 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3610-static struct omap_hwmod_soc_ops soc_ops;
3611+static struct omap_hwmod_soc_ops soc_ops __read_only;
3612
3613 /* omap_hwmod_list contains all registered struct omap_hwmods */
3614 static LIST_HEAD(omap_hwmod_list);
3615diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3616index 95fee54..cfa9cf1 100644
3617--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3618+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3619@@ -10,6 +10,7 @@
3620
3621 #include <linux/kernel.h>
3622 #include <linux/init.h>
3623+#include <asm/pgtable.h>
3624
3625 #include "powerdomain.h"
3626
3627@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3628
3629 void __init am43xx_powerdomains_init(void)
3630 {
3631- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3632+ pax_open_kernel();
3633+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3634+ pax_close_kernel();
3635 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3636 pwrdm_register_pwrdms(powerdomains_am43xx);
3637 pwrdm_complete_init();
3638diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3639index ff0a68c..b312aa0 100644
3640--- a/arch/arm/mach-omap2/wd_timer.c
3641+++ b/arch/arm/mach-omap2/wd_timer.c
3642@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3643 struct omap_hwmod *oh;
3644 char *oh_name = "wd_timer2";
3645 char *dev_name = "omap_wdt";
3646- struct omap_wd_timer_platform_data pdata;
3647+ static struct omap_wd_timer_platform_data pdata = {
3648+ .read_reset_sources = prm_read_reset_sources
3649+ };
3650
3651 if (!cpu_class_is_omap2() || of_have_populated_dt())
3652 return 0;
3653@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3654 return -EINVAL;
3655 }
3656
3657- pdata.read_reset_sources = prm_read_reset_sources;
3658-
3659 pdev = omap_device_build(dev_name, id, oh, &pdata,
3660 sizeof(struct omap_wd_timer_platform_data));
3661 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3662diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3663index 4f25a7c..a81be85 100644
3664--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3665+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3666@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3667 bool entered_lp2 = false;
3668
3669 if (tegra_pending_sgi())
3670- ACCESS_ONCE(abort_flag) = true;
3671+ ACCESS_ONCE_RW(abort_flag) = true;
3672
3673 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3674
3675diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3676index ab95f53..4b977a7 100644
3677--- a/arch/arm/mach-tegra/irq.c
3678+++ b/arch/arm/mach-tegra/irq.c
3679@@ -20,6 +20,7 @@
3680 #include <linux/cpu_pm.h>
3681 #include <linux/interrupt.h>
3682 #include <linux/io.h>
3683+#include <linux/irq.h>
3684 #include <linux/irqchip/arm-gic.h>
3685 #include <linux/irq.h>
3686 #include <linux/kernel.h>
3687diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3688index 2cb587b..6ddfebf 100644
3689--- a/arch/arm/mach-ux500/pm.c
3690+++ b/arch/arm/mach-ux500/pm.c
3691@@ -10,6 +10,7 @@
3692 */
3693
3694 #include <linux/kernel.h>
3695+#include <linux/irq.h>
3696 #include <linux/irqchip/arm-gic.h>
3697 #include <linux/delay.h>
3698 #include <linux/io.h>
3699diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3700index 2dea8b5..6499da2 100644
3701--- a/arch/arm/mach-ux500/setup.h
3702+++ b/arch/arm/mach-ux500/setup.h
3703@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3704 .type = MT_DEVICE, \
3705 }
3706
3707-#define __MEM_DEV_DESC(x, sz) { \
3708- .virtual = IO_ADDRESS(x), \
3709- .pfn = __phys_to_pfn(x), \
3710- .length = sz, \
3711- .type = MT_MEMORY_RWX, \
3712-}
3713-
3714 extern struct smp_operations ux500_smp_ops;
3715 extern void ux500_cpu_die(unsigned int cpu);
3716
3717diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3718index 52d768f..5f93180 100644
3719--- a/arch/arm/mach-zynq/platsmp.c
3720+++ b/arch/arm/mach-zynq/platsmp.c
3721@@ -24,6 +24,7 @@
3722 #include <linux/io.h>
3723 #include <asm/cacheflush.h>
3724 #include <asm/smp_scu.h>
3725+#include <linux/irq.h>
3726 #include <linux/irqchip/arm-gic.h>
3727 #include "common.h"
3728
3729diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3730index 9b4f29e..bbf3bfa 100644
3731--- a/arch/arm/mm/Kconfig
3732+++ b/arch/arm/mm/Kconfig
3733@@ -446,6 +446,7 @@ config CPU_32v5
3734
3735 config CPU_32v6
3736 bool
3737+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3738 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3739
3740 config CPU_32v6K
3741@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3742
3743 config CPU_USE_DOMAINS
3744 bool
3745+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3746 help
3747 This option enables or disables the use of domain switching
3748 via the set_fs() function.
3749@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3750
3751 config KUSER_HELPERS
3752 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3753- depends on MMU
3754+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3755 default y
3756 help
3757 Warning: disabling this option may break user programs.
3758@@ -812,7 +814,7 @@ config KUSER_HELPERS
3759 See Documentation/arm/kernel_user_helpers.txt for details.
3760
3761 However, the fixed address nature of these helpers can be used
3762- by ROP (return orientated programming) authors when creating
3763+ by ROP (Return Oriented Programming) authors when creating
3764 exploits.
3765
3766 If all of the binaries and libraries which run on your platform
3767diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3768index 2c0c541..4585df9 100644
3769--- a/arch/arm/mm/alignment.c
3770+++ b/arch/arm/mm/alignment.c
3771@@ -216,10 +216,12 @@ union offset_union {
3772 #define __get16_unaligned_check(ins,val,addr) \
3773 do { \
3774 unsigned int err = 0, v, a = addr; \
3775+ pax_open_userland(); \
3776 __get8_unaligned_check(ins,v,a,err); \
3777 val = v << ((BE) ? 8 : 0); \
3778 __get8_unaligned_check(ins,v,a,err); \
3779 val |= v << ((BE) ? 0 : 8); \
3780+ pax_close_userland(); \
3781 if (err) \
3782 goto fault; \
3783 } while (0)
3784@@ -233,6 +235,7 @@ union offset_union {
3785 #define __get32_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 24 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792@@ -241,6 +244,7 @@ union offset_union {
3793 val |= v << ((BE) ? 8 : 16); \
3794 __get8_unaligned_check(ins,v,a,err); \
3795 val |= v << ((BE) ? 0 : 24); \
3796+ pax_close_userland(); \
3797 if (err) \
3798 goto fault; \
3799 } while (0)
3800@@ -254,6 +258,7 @@ union offset_union {
3801 #define __put16_unaligned_check(ins,val,addr) \
3802 do { \
3803 unsigned int err = 0, v = val, a = addr; \
3804+ pax_open_userland(); \
3805 __asm__( FIRST_BYTE_16 \
3806 ARM( "1: "ins" %1, [%2], #1\n" ) \
3807 THUMB( "1: "ins" %1, [%2]\n" ) \
3808@@ -273,6 +278,7 @@ union offset_union {
3809 " .popsection\n" \
3810 : "=r" (err), "=&r" (v), "=&r" (a) \
3811 : "0" (err), "1" (v), "2" (a)); \
3812+ pax_close_userland(); \
3813 if (err) \
3814 goto fault; \
3815 } while (0)
3816@@ -286,6 +292,7 @@ union offset_union {
3817 #define __put32_unaligned_check(ins,val,addr) \
3818 do { \
3819 unsigned int err = 0, v = val, a = addr; \
3820+ pax_open_userland(); \
3821 __asm__( FIRST_BYTE_32 \
3822 ARM( "1: "ins" %1, [%2], #1\n" ) \
3823 THUMB( "1: "ins" %1, [%2]\n" ) \
3824@@ -315,6 +322,7 @@ union offset_union {
3825 " .popsection\n" \
3826 : "=r" (err), "=&r" (v), "=&r" (a) \
3827 : "0" (err), "1" (v), "2" (a)); \
3828+ pax_close_userland(); \
3829 if (err) \
3830 goto fault; \
3831 } while (0)
3832diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3833index 8f15f70..d599a2b 100644
3834--- a/arch/arm/mm/cache-l2x0.c
3835+++ b/arch/arm/mm/cache-l2x0.c
3836@@ -43,7 +43,7 @@ struct l2c_init_data {
3837 void (*save)(void __iomem *);
3838 void (*configure)(void __iomem *);
3839 struct outer_cache_fns outer_cache;
3840-};
3841+} __do_const;
3842
3843 #define CACHE_LINE_SIZE 32
3844
3845diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3846index 845769e..4278fd7 100644
3847--- a/arch/arm/mm/context.c
3848+++ b/arch/arm/mm/context.c
3849@@ -43,7 +43,7 @@
3850 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3851
3852 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3853-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3854+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3855 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3856
3857 static DEFINE_PER_CPU(atomic64_t, active_asids);
3858@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3859 {
3860 static u32 cur_idx = 1;
3861 u64 asid = atomic64_read(&mm->context.id);
3862- u64 generation = atomic64_read(&asid_generation);
3863+ u64 generation = atomic64_read_unchecked(&asid_generation);
3864
3865 if (asid != 0) {
3866 /*
3867@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3868 */
3869 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3870 if (asid == NUM_USER_ASIDS) {
3871- generation = atomic64_add_return(ASID_FIRST_VERSION,
3872+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3873 &asid_generation);
3874 flush_context(cpu);
3875 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3876@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3877 cpu_set_reserved_ttbr0();
3878
3879 asid = atomic64_read(&mm->context.id);
3880- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3881+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3882 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3883 goto switch_mm_fastpath;
3884
3885 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3886 /* Check that our ASID belongs to the current generation. */
3887 asid = atomic64_read(&mm->context.id);
3888- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3889+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3890 asid = new_context(mm, cpu);
3891 atomic64_set(&mm->context.id, asid);
3892 }
3893diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3894index 6333d9c..fd09b46 100644
3895--- a/arch/arm/mm/fault.c
3896+++ b/arch/arm/mm/fault.c
3897@@ -25,6 +25,7 @@
3898 #include <asm/system_misc.h>
3899 #include <asm/system_info.h>
3900 #include <asm/tlbflush.h>
3901+#include <asm/sections.h>
3902
3903 #include "fault.h"
3904
3905@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3906 if (fixup_exception(regs))
3907 return;
3908
3909+#ifdef CONFIG_PAX_MEMORY_UDEREF
3910+ if (addr < TASK_SIZE) {
3911+ if (current->signal->curr_ip)
3912+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3913+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3914+ else
3915+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3916+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3917+ }
3918+#endif
3919+
3920+#ifdef CONFIG_PAX_KERNEXEC
3921+ if ((fsr & FSR_WRITE) &&
3922+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3923+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3924+ {
3925+ if (current->signal->curr_ip)
3926+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3927+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3928+ else
3929+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3930+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3931+ }
3932+#endif
3933+
3934 /*
3935 * No handler, we'll have to terminate things with extreme prejudice.
3936 */
3937@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3938 }
3939 #endif
3940
3941+#ifdef CONFIG_PAX_PAGEEXEC
3942+ if (fsr & FSR_LNX_PF) {
3943+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3944+ do_group_exit(SIGKILL);
3945+ }
3946+#endif
3947+
3948 tsk->thread.address = addr;
3949 tsk->thread.error_code = fsr;
3950 tsk->thread.trap_no = 14;
3951@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3952 }
3953 #endif /* CONFIG_MMU */
3954
3955+#ifdef CONFIG_PAX_PAGEEXEC
3956+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3957+{
3958+ long i;
3959+
3960+ printk(KERN_ERR "PAX: bytes at PC: ");
3961+ for (i = 0; i < 20; i++) {
3962+ unsigned char c;
3963+ if (get_user(c, (__force unsigned char __user *)pc+i))
3964+ printk(KERN_CONT "?? ");
3965+ else
3966+ printk(KERN_CONT "%02x ", c);
3967+ }
3968+ printk("\n");
3969+
3970+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3971+ for (i = -1; i < 20; i++) {
3972+ unsigned long c;
3973+ if (get_user(c, (__force unsigned long __user *)sp+i))
3974+ printk(KERN_CONT "???????? ");
3975+ else
3976+ printk(KERN_CONT "%08lx ", c);
3977+ }
3978+ printk("\n");
3979+}
3980+#endif
3981+
3982 /*
3983 * First Level Translation Fault Handler
3984 *
3985@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3986 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3987 struct siginfo info;
3988
3989+#ifdef CONFIG_PAX_MEMORY_UDEREF
3990+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3991+ if (current->signal->curr_ip)
3992+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3993+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3994+ else
3995+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3996+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3997+ goto die;
3998+ }
3999+#endif
4000+
4001 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4002 return;
4003
4004+die:
4005 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4006 inf->name, fsr, addr);
4007 show_pte(current->mm, addr);
4008@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4009 ifsr_info[nr].name = name;
4010 }
4011
4012+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4013+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4014+
4015 asmlinkage void __exception
4016 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4017 {
4018 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4019 struct siginfo info;
4020+ unsigned long pc = instruction_pointer(regs);
4021+
4022+ if (user_mode(regs)) {
4023+ unsigned long sigpage = current->mm->context.sigpage;
4024+
4025+ if (sigpage <= pc && pc < sigpage + 7*4) {
4026+ if (pc < sigpage + 3*4)
4027+ sys_sigreturn(regs);
4028+ else
4029+ sys_rt_sigreturn(regs);
4030+ return;
4031+ }
4032+ if (pc == 0xffff0f60UL) {
4033+ /*
4034+ * PaX: __kuser_cmpxchg64 emulation
4035+ */
4036+ // TODO
4037+ //regs->ARM_pc = regs->ARM_lr;
4038+ //return;
4039+ }
4040+ if (pc == 0xffff0fa0UL) {
4041+ /*
4042+ * PaX: __kuser_memory_barrier emulation
4043+ */
4044+ // dmb(); implied by the exception
4045+ regs->ARM_pc = regs->ARM_lr;
4046+ return;
4047+ }
4048+ if (pc == 0xffff0fc0UL) {
4049+ /*
4050+ * PaX: __kuser_cmpxchg emulation
4051+ */
4052+ // TODO
4053+ //long new;
4054+ //int op;
4055+
4056+ //op = FUTEX_OP_SET << 28;
4057+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4058+ //regs->ARM_r0 = old != new;
4059+ //regs->ARM_pc = regs->ARM_lr;
4060+ //return;
4061+ }
4062+ if (pc == 0xffff0fe0UL) {
4063+ /*
4064+ * PaX: __kuser_get_tls emulation
4065+ */
4066+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4067+ regs->ARM_pc = regs->ARM_lr;
4068+ return;
4069+ }
4070+ }
4071+
4072+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4073+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4074+ if (current->signal->curr_ip)
4075+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4077+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4078+ else
4079+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4080+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4081+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4082+ goto die;
4083+ }
4084+#endif
4085+
4086+#ifdef CONFIG_PAX_REFCOUNT
4087+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4088+#ifdef CONFIG_THUMB2_KERNEL
4089+ unsigned short bkpt;
4090+
4091+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4092+#else
4093+ unsigned int bkpt;
4094+
4095+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4096+#endif
4097+ current->thread.error_code = ifsr;
4098+ current->thread.trap_no = 0;
4099+ pax_report_refcount_overflow(regs);
4100+ fixup_exception(regs);
4101+ return;
4102+ }
4103+ }
4104+#endif
4105
4106 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4107 return;
4108
4109+die:
4110 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4111 inf->name, ifsr, addr);
4112
4113diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4114index cf08bdf..772656c 100644
4115--- a/arch/arm/mm/fault.h
4116+++ b/arch/arm/mm/fault.h
4117@@ -3,6 +3,7 @@
4118
4119 /*
4120 * Fault status register encodings. We steal bit 31 for our own purposes.
4121+ * Set when the FSR value is from an instruction fault.
4122 */
4123 #define FSR_LNX_PF (1 << 31)
4124 #define FSR_WRITE (1 << 11)
4125@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4126 }
4127 #endif
4128
4129+/* valid for LPAE and !LPAE */
4130+static inline int is_xn_fault(unsigned int fsr)
4131+{
4132+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4133+}
4134+
4135+static inline int is_domain_fault(unsigned int fsr)
4136+{
4137+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4138+}
4139+
4140 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4141 unsigned long search_exception_table(unsigned long addr);
4142
4143diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4144index 1609b02..def0785 100644
4145--- a/arch/arm/mm/init.c
4146+++ b/arch/arm/mm/init.c
4147@@ -755,7 +755,46 @@ void free_tcmmem(void)
4148 {
4149 #ifdef CONFIG_HAVE_TCM
4150 extern char __tcm_start, __tcm_end;
4151+#endif
4152
4153+#ifdef CONFIG_PAX_KERNEXEC
4154+ unsigned long addr;
4155+ pgd_t *pgd;
4156+ pud_t *pud;
4157+ pmd_t *pmd;
4158+ int cpu_arch = cpu_architecture();
4159+ unsigned int cr = get_cr();
4160+
4161+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4162+ /* make pages tables, etc before .text NX */
4163+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4164+ pgd = pgd_offset_k(addr);
4165+ pud = pud_offset(pgd, addr);
4166+ pmd = pmd_offset(pud, addr);
4167+ __section_update(pmd, addr, PMD_SECT_XN);
4168+ }
4169+ /* make init NX */
4170+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4171+ pgd = pgd_offset_k(addr);
4172+ pud = pud_offset(pgd, addr);
4173+ pmd = pmd_offset(pud, addr);
4174+ __section_update(pmd, addr, PMD_SECT_XN);
4175+ }
4176+ /* make kernel code/rodata RX */
4177+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4178+ pgd = pgd_offset_k(addr);
4179+ pud = pud_offset(pgd, addr);
4180+ pmd = pmd_offset(pud, addr);
4181+#ifdef CONFIG_ARM_LPAE
4182+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4183+#else
4184+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4185+#endif
4186+ }
4187+ }
4188+#endif
4189+
4190+#ifdef CONFIG_HAVE_TCM
4191 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4192 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4193 #endif
4194diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4195index d1e5ad7..84dcbf2 100644
4196--- a/arch/arm/mm/ioremap.c
4197+++ b/arch/arm/mm/ioremap.c
4198@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4199 unsigned int mtype;
4200
4201 if (cached)
4202- mtype = MT_MEMORY_RWX;
4203+ mtype = MT_MEMORY_RX;
4204 else
4205- mtype = MT_MEMORY_RWX_NONCACHED;
4206+ mtype = MT_MEMORY_RX_NONCACHED;
4207
4208 return __arm_ioremap_caller(phys_addr, size, mtype,
4209 __builtin_return_address(0));
4210diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4211index 5e85ed3..b10a7ed 100644
4212--- a/arch/arm/mm/mmap.c
4213+++ b/arch/arm/mm/mmap.c
4214@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4215 struct vm_area_struct *vma;
4216 int do_align = 0;
4217 int aliasing = cache_is_vipt_aliasing();
4218+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4219 struct vm_unmapped_area_info info;
4220
4221 /*
4222@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4223 if (len > TASK_SIZE)
4224 return -ENOMEM;
4225
4226+#ifdef CONFIG_PAX_RANDMMAP
4227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4228+#endif
4229+
4230 if (addr) {
4231 if (do_align)
4232 addr = COLOUR_ALIGN(addr, pgoff);
4233@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4234 addr = PAGE_ALIGN(addr);
4235
4236 vma = find_vma(mm, addr);
4237- if (TASK_SIZE - len >= addr &&
4238- (!vma || addr + len <= vma->vm_start))
4239+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4240 return addr;
4241 }
4242
4243@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4244 info.high_limit = TASK_SIZE;
4245 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4246 info.align_offset = pgoff << PAGE_SHIFT;
4247+ info.threadstack_offset = offset;
4248 return vm_unmapped_area(&info);
4249 }
4250
4251@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4252 unsigned long addr = addr0;
4253 int do_align = 0;
4254 int aliasing = cache_is_vipt_aliasing();
4255+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4256 struct vm_unmapped_area_info info;
4257
4258 /*
4259@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4260 return addr;
4261 }
4262
4263+#ifdef CONFIG_PAX_RANDMMAP
4264+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4265+#endif
4266+
4267 /* requesting a specific address */
4268 if (addr) {
4269 if (do_align)
4270@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4271 else
4272 addr = PAGE_ALIGN(addr);
4273 vma = find_vma(mm, addr);
4274- if (TASK_SIZE - len >= addr &&
4275- (!vma || addr + len <= vma->vm_start))
4276+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4277 return addr;
4278 }
4279
4280@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4281 info.high_limit = mm->mmap_base;
4282 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4283 info.align_offset = pgoff << PAGE_SHIFT;
4284+ info.threadstack_offset = offset;
4285 addr = vm_unmapped_area(&info);
4286
4287 /*
4288@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4289 {
4290 unsigned long random_factor = 0UL;
4291
4292+#ifdef CONFIG_PAX_RANDMMAP
4293+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4294+#endif
4295+
4296 /* 8 bits of randomness in 20 address space bits */
4297 if ((current->flags & PF_RANDOMIZE) &&
4298 !(current->personality & ADDR_NO_RANDOMIZE))
4299@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4300
4301 if (mmap_is_legacy()) {
4302 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4303+
4304+#ifdef CONFIG_PAX_RANDMMAP
4305+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4306+ mm->mmap_base += mm->delta_mmap;
4307+#endif
4308+
4309 mm->get_unmapped_area = arch_get_unmapped_area;
4310 } else {
4311 mm->mmap_base = mmap_base(random_factor);
4312+
4313+#ifdef CONFIG_PAX_RANDMMAP
4314+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4315+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4316+#endif
4317+
4318 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4319 }
4320 }
4321diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4322index 4e6ef89..21c27f2 100644
4323--- a/arch/arm/mm/mmu.c
4324+++ b/arch/arm/mm/mmu.c
4325@@ -41,6 +41,22 @@
4326 #include "mm.h"
4327 #include "tcm.h"
4328
4329+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4330+void modify_domain(unsigned int dom, unsigned int type)
4331+{
4332+ struct thread_info *thread = current_thread_info();
4333+ unsigned int domain = thread->cpu_domain;
4334+ /*
4335+ * DOMAIN_MANAGER might be defined to some other value,
4336+ * use the arch-defined constant
4337+ */
4338+ domain &= ~domain_val(dom, 3);
4339+ thread->cpu_domain = domain | domain_val(dom, type);
4340+ set_domain(thread->cpu_domain);
4341+}
4342+EXPORT_SYMBOL(modify_domain);
4343+#endif
4344+
4345 /*
4346 * empty_zero_page is a special page that is used for
4347 * zero-initialized data and COW.
4348@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4349 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4350 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4351
4352-static struct mem_type mem_types[] = {
4353+#ifdef CONFIG_PAX_KERNEXEC
4354+#define L_PTE_KERNEXEC L_PTE_RDONLY
4355+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4356+#else
4357+#define L_PTE_KERNEXEC L_PTE_DIRTY
4358+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4359+#endif
4360+
4361+static struct mem_type mem_types[] __read_only = {
4362 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4363 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4364 L_PTE_SHARED,
4365@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4366 .prot_sect = PROT_SECT_DEVICE,
4367 .domain = DOMAIN_IO,
4368 },
4369- [MT_UNCACHED] = {
4370+ [MT_UNCACHED_RW] = {
4371 .prot_pte = PROT_PTE_DEVICE,
4372 .prot_l1 = PMD_TYPE_TABLE,
4373 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4374 .domain = DOMAIN_IO,
4375 },
4376- [MT_CACHECLEAN] = {
4377- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4378+ [MT_CACHECLEAN_RO] = {
4379+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4380 .domain = DOMAIN_KERNEL,
4381 },
4382 #ifndef CONFIG_ARM_LPAE
4383- [MT_MINICLEAN] = {
4384- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4385+ [MT_MINICLEAN_RO] = {
4386+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4387 .domain = DOMAIN_KERNEL,
4388 },
4389 #endif
4390@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4391 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4392 L_PTE_RDONLY,
4393 .prot_l1 = PMD_TYPE_TABLE,
4394- .domain = DOMAIN_USER,
4395+ .domain = DOMAIN_VECTORS,
4396 },
4397 [MT_HIGH_VECTORS] = {
4398 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4399 L_PTE_USER | L_PTE_RDONLY,
4400 .prot_l1 = PMD_TYPE_TABLE,
4401- .domain = DOMAIN_USER,
4402+ .domain = DOMAIN_VECTORS,
4403 },
4404- [MT_MEMORY_RWX] = {
4405+ [__MT_MEMORY_RWX] = {
4406 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4407 .prot_l1 = PMD_TYPE_TABLE,
4408 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4409@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4410 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4411 .domain = DOMAIN_KERNEL,
4412 },
4413- [MT_ROM] = {
4414- .prot_sect = PMD_TYPE_SECT,
4415+ [MT_MEMORY_RX] = {
4416+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4417+ .prot_l1 = PMD_TYPE_TABLE,
4418+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4419+ .domain = DOMAIN_KERNEL,
4420+ },
4421+ [MT_ROM_RX] = {
4422+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4423 .domain = DOMAIN_KERNEL,
4424 },
4425- [MT_MEMORY_RWX_NONCACHED] = {
4426+ [MT_MEMORY_RW_NONCACHED] = {
4427 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4428 L_PTE_MT_BUFFERABLE,
4429 .prot_l1 = PMD_TYPE_TABLE,
4430 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4431 .domain = DOMAIN_KERNEL,
4432 },
4433+ [MT_MEMORY_RX_NONCACHED] = {
4434+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4435+ L_PTE_MT_BUFFERABLE,
4436+ .prot_l1 = PMD_TYPE_TABLE,
4437+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4438+ .domain = DOMAIN_KERNEL,
4439+ },
4440 [MT_MEMORY_RW_DTCM] = {
4441 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4442 L_PTE_XN,
4443@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4444 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4445 .domain = DOMAIN_KERNEL,
4446 },
4447- [MT_MEMORY_RWX_ITCM] = {
4448- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4449+ [MT_MEMORY_RX_ITCM] = {
4450+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4451 .prot_l1 = PMD_TYPE_TABLE,
4452+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4453 .domain = DOMAIN_KERNEL,
4454 },
4455 [MT_MEMORY_RW_SO] = {
4456@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4457 * Mark cache clean areas and XIP ROM read only
4458 * from SVC mode and no access from userspace.
4459 */
4460- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4461- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4462- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4463+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4464+#ifdef CONFIG_PAX_KERNEXEC
4465+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4466+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4467+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4468+#endif
4469+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4470+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4471 #endif
4472
4473 /*
4474@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4475 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4476 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4477 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4478- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4479- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4480+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4481+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4482 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4483 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4484+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4485+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4486 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4487- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4488- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4489+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4490+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4493 }
4494 }
4495
4496@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4497 if (cpu_arch >= CPU_ARCH_ARMv6) {
4498 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4499 /* Non-cacheable Normal is XCB = 001 */
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4501+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4502+ PMD_SECT_BUFFERED;
4503+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4504 PMD_SECT_BUFFERED;
4505 } else {
4506 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4507- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4508+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4509+ PMD_SECT_TEX(1);
4510+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4511 PMD_SECT_TEX(1);
4512 }
4513 } else {
4514- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4515+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4517 }
4518
4519 #ifdef CONFIG_ARM_LPAE
4520@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4521 user_pgprot |= PTE_EXT_PXN;
4522 #endif
4523
4524+ user_pgprot |= __supported_pte_mask;
4525+
4526 for (i = 0; i < 16; i++) {
4527 pteval_t v = pgprot_val(protection_map[i]);
4528 protection_map[i] = __pgprot(v | user_pgprot);
4529@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4530
4531 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4532 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4533- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4534- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4535+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4536+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4537 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4538 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4539+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4540+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4541 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4542- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4543- mem_types[MT_ROM].prot_sect |= cp->pmd;
4544+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4545+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4546+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4547
4548 switch (cp->pmd) {
4549 case PMD_SECT_WT:
4550- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4551+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4552 break;
4553 case PMD_SECT_WB:
4554 case PMD_SECT_WBWA:
4555- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4556+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4557 break;
4558 }
4559 pr_info("Memory policy: %sData cache %s\n",
4560@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4561 return;
4562 }
4563
4564- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4565+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4566 md->virtual >= PAGE_OFFSET &&
4567 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4568 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4569@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4570 * called function. This means you can't use any function or debugging
4571 * method which may touch any device, otherwise the kernel _will_ crash.
4572 */
4573+
4574+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4575+
4576 static void __init devicemaps_init(const struct machine_desc *mdesc)
4577 {
4578 struct map_desc map;
4579 unsigned long addr;
4580- void *vectors;
4581
4582- /*
4583- * Allocate the vector page early.
4584- */
4585- vectors = early_alloc(PAGE_SIZE * 2);
4586-
4587- early_trap_init(vectors);
4588+ early_trap_init(&vectors);
4589
4590 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4591 pmd_clear(pmd_off_k(addr));
4592@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4593 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4594 map.virtual = MODULES_VADDR;
4595 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4596- map.type = MT_ROM;
4597+ map.type = MT_ROM_RX;
4598 create_mapping(&map);
4599 #endif
4600
4601@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4602 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4603 map.virtual = FLUSH_BASE;
4604 map.length = SZ_1M;
4605- map.type = MT_CACHECLEAN;
4606+ map.type = MT_CACHECLEAN_RO;
4607 create_mapping(&map);
4608 #endif
4609 #ifdef FLUSH_BASE_MINICACHE
4610 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4611 map.virtual = FLUSH_BASE_MINICACHE;
4612 map.length = SZ_1M;
4613- map.type = MT_MINICLEAN;
4614+ map.type = MT_MINICLEAN_RO;
4615 create_mapping(&map);
4616 #endif
4617
4618@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4619 * location (0xffff0000). If we aren't using high-vectors, also
4620 * create a mapping at the low-vectors virtual address.
4621 */
4622- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4623+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4624 map.virtual = 0xffff0000;
4625 map.length = PAGE_SIZE;
4626 #ifdef CONFIG_KUSER_HELPERS
4627@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4628 static void __init map_lowmem(void)
4629 {
4630 struct memblock_region *reg;
4631+#ifndef CONFIG_PAX_KERNEXEC
4632 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4633 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4634+#endif
4635
4636 /* Map all the lowmem memory banks. */
4637 for_each_memblock(memory, reg) {
4638@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4639 if (start >= end)
4640 break;
4641
4642+#ifdef CONFIG_PAX_KERNEXEC
4643+ map.pfn = __phys_to_pfn(start);
4644+ map.virtual = __phys_to_virt(start);
4645+ map.length = end - start;
4646+
4647+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4648+ struct map_desc kernel;
4649+ struct map_desc initmap;
4650+
4651+ /* when freeing initmem we will make this RW */
4652+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4653+ initmap.virtual = (unsigned long)__init_begin;
4654+ initmap.length = _sdata - __init_begin;
4655+ initmap.type = __MT_MEMORY_RWX;
4656+ create_mapping(&initmap);
4657+
4658+ /* when freeing initmem we will make this RX */
4659+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4660+ kernel.virtual = (unsigned long)_stext;
4661+ kernel.length = __init_begin - _stext;
4662+ kernel.type = __MT_MEMORY_RWX;
4663+ create_mapping(&kernel);
4664+
4665+ if (map.virtual < (unsigned long)_stext) {
4666+ map.length = (unsigned long)_stext - map.virtual;
4667+ map.type = __MT_MEMORY_RWX;
4668+ create_mapping(&map);
4669+ }
4670+
4671+ map.pfn = __phys_to_pfn(__pa(_sdata));
4672+ map.virtual = (unsigned long)_sdata;
4673+ map.length = end - __pa(_sdata);
4674+ }
4675+
4676+ map.type = MT_MEMORY_RW;
4677+ create_mapping(&map);
4678+#else
4679 if (end < kernel_x_start) {
4680 map.pfn = __phys_to_pfn(start);
4681 map.virtual = __phys_to_virt(start);
4682 map.length = end - start;
4683- map.type = MT_MEMORY_RWX;
4684+ map.type = __MT_MEMORY_RWX;
4685
4686 create_mapping(&map);
4687 } else if (start >= kernel_x_end) {
4688@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4689 map.pfn = __phys_to_pfn(kernel_x_start);
4690 map.virtual = __phys_to_virt(kernel_x_start);
4691 map.length = kernel_x_end - kernel_x_start;
4692- map.type = MT_MEMORY_RWX;
4693+ map.type = __MT_MEMORY_RWX;
4694
4695 create_mapping(&map);
4696
4697@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4698 create_mapping(&map);
4699 }
4700 }
4701+#endif
4702 }
4703 }
4704
4705diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4706index f412b53..fc89433 100644
4707--- a/arch/arm/net/bpf_jit_32.c
4708+++ b/arch/arm/net/bpf_jit_32.c
4709@@ -20,6 +20,7 @@
4710 #include <asm/cacheflush.h>
4711 #include <asm/hwcap.h>
4712 #include <asm/opcodes.h>
4713+#include <asm/pgtable.h>
4714
4715 #include "bpf_jit_32.h"
4716
4717@@ -71,7 +72,11 @@ struct jit_ctx {
4718 #endif
4719 };
4720
4721+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4722+int bpf_jit_enable __read_only;
4723+#else
4724 int bpf_jit_enable __read_mostly;
4725+#endif
4726
4727 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4728 {
4729@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4730 {
4731 u32 *ptr;
4732 /* We are guaranteed to have aligned memory. */
4733+ pax_open_kernel();
4734 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4735 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4736+ pax_close_kernel();
4737 }
4738
4739 static void build_prologue(struct jit_ctx *ctx)
4740diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4741index 5b217f4..c23f40e 100644
4742--- a/arch/arm/plat-iop/setup.c
4743+++ b/arch/arm/plat-iop/setup.c
4744@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4745 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4746 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4747 .length = IOP3XX_PERIPHERAL_SIZE,
4748- .type = MT_UNCACHED,
4749+ .type = MT_UNCACHED_RW,
4750 },
4751 };
4752
4753diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4754index a5bc92d..0bb4730 100644
4755--- a/arch/arm/plat-omap/sram.c
4756+++ b/arch/arm/plat-omap/sram.c
4757@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4758 * Looks like we need to preserve some bootloader code at the
4759 * beginning of SRAM for jumping to flash for reboot to work...
4760 */
4761+ pax_open_kernel();
4762 memset_io(omap_sram_base + omap_sram_skip, 0,
4763 omap_sram_size - omap_sram_skip);
4764+ pax_close_kernel();
4765 }
4766diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4767index 7047051..44e8675 100644
4768--- a/arch/arm64/include/asm/atomic.h
4769+++ b/arch/arm64/include/asm/atomic.h
4770@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4771 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4772 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4773
4774+#define atomic64_read_unchecked(v) atomic64_read(v)
4775+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4776+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4777+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4778+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4779+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4780+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4781+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4782+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4783+
4784 #endif
4785 #endif
4786diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4787index a5abb00..9cbca9a 100644
4788--- a/arch/arm64/include/asm/barrier.h
4789+++ b/arch/arm64/include/asm/barrier.h
4790@@ -44,7 +44,7 @@
4791 do { \
4792 compiletime_assert_atomic_type(*p); \
4793 barrier(); \
4794- ACCESS_ONCE(*p) = (v); \
4795+ ACCESS_ONCE_RW(*p) = (v); \
4796 } while (0)
4797
4798 #define smp_load_acquire(p) \
4799diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4800index 4fde8c1..441f84f 100644
4801--- a/arch/arm64/include/asm/percpu.h
4802+++ b/arch/arm64/include/asm/percpu.h
4803@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4804 {
4805 switch (size) {
4806 case 1:
4807- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4808+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4809 break;
4810 case 2:
4811- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4812+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4813 break;
4814 case 4:
4815- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4816+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4817 break;
4818 case 8:
4819- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4820+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4821 break;
4822 default:
4823 BUILD_BUG();
4824diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4825index e20df38..027ede3 100644
4826--- a/arch/arm64/include/asm/pgalloc.h
4827+++ b/arch/arm64/include/asm/pgalloc.h
4828@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4829 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4830 }
4831
4832+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4833+{
4834+ pud_populate(mm, pud, pmd);
4835+}
4836+
4837 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4838
4839 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4840diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4841index 07e1ba44..ec8cbbb 100644
4842--- a/arch/arm64/include/asm/uaccess.h
4843+++ b/arch/arm64/include/asm/uaccess.h
4844@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4845 flag; \
4846 })
4847
4848+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4849 #define access_ok(type, addr, size) __range_ok(addr, size)
4850 #define user_addr_max get_fs
4851
4852diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4853index b0bd4e5..54e82f6 100644
4854--- a/arch/arm64/mm/dma-mapping.c
4855+++ b/arch/arm64/mm/dma-mapping.c
4856@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4857 phys_to_page(paddr),
4858 size >> PAGE_SHIFT);
4859 if (!freed)
4860- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4861+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4862 }
4863
4864 static void *__dma_alloc(struct device *dev, size_t size,
4865diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4866index c3a58a1..78fbf54 100644
4867--- a/arch/avr32/include/asm/cache.h
4868+++ b/arch/avr32/include/asm/cache.h
4869@@ -1,8 +1,10 @@
4870 #ifndef __ASM_AVR32_CACHE_H
4871 #define __ASM_AVR32_CACHE_H
4872
4873+#include <linux/const.h>
4874+
4875 #define L1_CACHE_SHIFT 5
4876-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4877+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4878
4879 /*
4880 * Memory returned by kmalloc() may be used for DMA, so we must make
4881diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4882index d232888..87c8df1 100644
4883--- a/arch/avr32/include/asm/elf.h
4884+++ b/arch/avr32/include/asm/elf.h
4885@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4886 the loader. We need to make sure that it is out of the way of the program
4887 that it will "exec", and that there is sufficient room for the brk. */
4888
4889-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4890+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4891
4892+#ifdef CONFIG_PAX_ASLR
4893+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4894+
4895+#define PAX_DELTA_MMAP_LEN 15
4896+#define PAX_DELTA_STACK_LEN 15
4897+#endif
4898
4899 /* This yields a mask that user programs can use to figure out what
4900 instruction set this CPU supports. This could be done in user space,
4901diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4902index 479330b..53717a8 100644
4903--- a/arch/avr32/include/asm/kmap_types.h
4904+++ b/arch/avr32/include/asm/kmap_types.h
4905@@ -2,9 +2,9 @@
4906 #define __ASM_AVR32_KMAP_TYPES_H
4907
4908 #ifdef CONFIG_DEBUG_HIGHMEM
4909-# define KM_TYPE_NR 29
4910+# define KM_TYPE_NR 30
4911 #else
4912-# define KM_TYPE_NR 14
4913+# define KM_TYPE_NR 15
4914 #endif
4915
4916 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4917diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4918index d223a8b..69c5210 100644
4919--- a/arch/avr32/mm/fault.c
4920+++ b/arch/avr32/mm/fault.c
4921@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4922
4923 int exception_trace = 1;
4924
4925+#ifdef CONFIG_PAX_PAGEEXEC
4926+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4927+{
4928+ unsigned long i;
4929+
4930+ printk(KERN_ERR "PAX: bytes at PC: ");
4931+ for (i = 0; i < 20; i++) {
4932+ unsigned char c;
4933+ if (get_user(c, (unsigned char *)pc+i))
4934+ printk(KERN_CONT "???????? ");
4935+ else
4936+ printk(KERN_CONT "%02x ", c);
4937+ }
4938+ printk("\n");
4939+}
4940+#endif
4941+
4942 /*
4943 * This routine handles page faults. It determines the address and the
4944 * problem, and then passes it off to one of the appropriate routines.
4945@@ -178,6 +195,16 @@ bad_area:
4946 up_read(&mm->mmap_sem);
4947
4948 if (user_mode(regs)) {
4949+
4950+#ifdef CONFIG_PAX_PAGEEXEC
4951+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4952+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4953+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4954+ do_group_exit(SIGKILL);
4955+ }
4956+ }
4957+#endif
4958+
4959 if (exception_trace && printk_ratelimit())
4960 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4961 "sp %08lx ecr %lu\n",
4962diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4963index 568885a..f8008df 100644
4964--- a/arch/blackfin/include/asm/cache.h
4965+++ b/arch/blackfin/include/asm/cache.h
4966@@ -7,6 +7,7 @@
4967 #ifndef __ARCH_BLACKFIN_CACHE_H
4968 #define __ARCH_BLACKFIN_CACHE_H
4969
4970+#include <linux/const.h>
4971 #include <linux/linkage.h> /* for asmlinkage */
4972
4973 /*
4974@@ -14,7 +15,7 @@
4975 * Blackfin loads 32 bytes for cache
4976 */
4977 #define L1_CACHE_SHIFT 5
4978-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4979+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4980 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4981
4982 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4983diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4984index aea2718..3639a60 100644
4985--- a/arch/cris/include/arch-v10/arch/cache.h
4986+++ b/arch/cris/include/arch-v10/arch/cache.h
4987@@ -1,8 +1,9 @@
4988 #ifndef _ASM_ARCH_CACHE_H
4989 #define _ASM_ARCH_CACHE_H
4990
4991+#include <linux/const.h>
4992 /* Etrax 100LX have 32-byte cache-lines. */
4993-#define L1_CACHE_BYTES 32
4994 #define L1_CACHE_SHIFT 5
4995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4996
4997 #endif /* _ASM_ARCH_CACHE_H */
4998diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4999index 7caf25d..ee65ac5 100644
5000--- a/arch/cris/include/arch-v32/arch/cache.h
5001+++ b/arch/cris/include/arch-v32/arch/cache.h
5002@@ -1,11 +1,12 @@
5003 #ifndef _ASM_CRIS_ARCH_CACHE_H
5004 #define _ASM_CRIS_ARCH_CACHE_H
5005
5006+#include <linux/const.h>
5007 #include <arch/hwregs/dma.h>
5008
5009 /* A cache-line is 32 bytes. */
5010-#define L1_CACHE_BYTES 32
5011 #define L1_CACHE_SHIFT 5
5012+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5013
5014 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5015
5016diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5017index 102190a..5334cea 100644
5018--- a/arch/frv/include/asm/atomic.h
5019+++ b/arch/frv/include/asm/atomic.h
5020@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5021 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5022 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5023
5024+#define atomic64_read_unchecked(v) atomic64_read(v)
5025+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5026+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5027+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5028+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5029+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5030+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5031+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5032+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5033+
5034 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5035 {
5036 int c, old;
5037diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5038index 2797163..c2a401df9 100644
5039--- a/arch/frv/include/asm/cache.h
5040+++ b/arch/frv/include/asm/cache.h
5041@@ -12,10 +12,11 @@
5042 #ifndef __ASM_CACHE_H
5043 #define __ASM_CACHE_H
5044
5045+#include <linux/const.h>
5046
5047 /* bytes per L1 cache line */
5048 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5049-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5050+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5051
5052 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5053 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5054diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5055index 43901f2..0d8b865 100644
5056--- a/arch/frv/include/asm/kmap_types.h
5057+++ b/arch/frv/include/asm/kmap_types.h
5058@@ -2,6 +2,6 @@
5059 #ifndef _ASM_KMAP_TYPES_H
5060 #define _ASM_KMAP_TYPES_H
5061
5062-#define KM_TYPE_NR 17
5063+#define KM_TYPE_NR 18
5064
5065 #endif
5066diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5067index 836f147..4cf23f5 100644
5068--- a/arch/frv/mm/elf-fdpic.c
5069+++ b/arch/frv/mm/elf-fdpic.c
5070@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5071 {
5072 struct vm_area_struct *vma;
5073 struct vm_unmapped_area_info info;
5074+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5075
5076 if (len > TASK_SIZE)
5077 return -ENOMEM;
5078@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5079 if (addr) {
5080 addr = PAGE_ALIGN(addr);
5081 vma = find_vma(current->mm, addr);
5082- if (TASK_SIZE - len >= addr &&
5083- (!vma || addr + len <= vma->vm_start))
5084+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5085 goto success;
5086 }
5087
5088@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5089 info.high_limit = (current->mm->start_stack - 0x00200000);
5090 info.align_mask = 0;
5091 info.align_offset = 0;
5092+ info.threadstack_offset = offset;
5093 addr = vm_unmapped_area(&info);
5094 if (!(addr & ~PAGE_MASK))
5095 goto success;
5096diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5097index 69952c18..4fa2908 100644
5098--- a/arch/hexagon/include/asm/cache.h
5099+++ b/arch/hexagon/include/asm/cache.h
5100@@ -21,9 +21,11 @@
5101 #ifndef __ASM_CACHE_H
5102 #define __ASM_CACHE_H
5103
5104+#include <linux/const.h>
5105+
5106 /* Bytes per L1 cache line */
5107-#define L1_CACHE_SHIFT (5)
5108-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5109+#define L1_CACHE_SHIFT 5
5110+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5111
5112 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5113
5114diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5115index 074e52b..76afdac 100644
5116--- a/arch/ia64/Kconfig
5117+++ b/arch/ia64/Kconfig
5118@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5119 config KEXEC
5120 bool "kexec system call"
5121 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5122+ depends on !GRKERNSEC_KMEM
5123 help
5124 kexec is a system call that implements the ability to shutdown your
5125 current kernel, and to start another kernel. It is like a reboot
5126diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5127index 970d0bd..e750b9b 100644
5128--- a/arch/ia64/Makefile
5129+++ b/arch/ia64/Makefile
5130@@ -98,5 +98,6 @@ endef
5131 archprepare: make_nr_irqs_h FORCE
5132 PHONY += make_nr_irqs_h FORCE
5133
5134+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5135 make_nr_irqs_h: FORCE
5136 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5137diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5138index 0bf0350..2ad1957 100644
5139--- a/arch/ia64/include/asm/atomic.h
5140+++ b/arch/ia64/include/asm/atomic.h
5141@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5142 #define atomic64_inc(v) atomic64_add(1, (v))
5143 #define atomic64_dec(v) atomic64_sub(1, (v))
5144
5145+#define atomic64_read_unchecked(v) atomic64_read(v)
5146+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5147+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5148+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5149+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5150+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5151+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5152+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5153+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5154+
5155 #endif /* _ASM_IA64_ATOMIC_H */
5156diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5157index f6769eb..1cdb590 100644
5158--- a/arch/ia64/include/asm/barrier.h
5159+++ b/arch/ia64/include/asm/barrier.h
5160@@ -66,7 +66,7 @@
5161 do { \
5162 compiletime_assert_atomic_type(*p); \
5163 barrier(); \
5164- ACCESS_ONCE(*p) = (v); \
5165+ ACCESS_ONCE_RW(*p) = (v); \
5166 } while (0)
5167
5168 #define smp_load_acquire(p) \
5169diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5170index 988254a..e1ee885 100644
5171--- a/arch/ia64/include/asm/cache.h
5172+++ b/arch/ia64/include/asm/cache.h
5173@@ -1,6 +1,7 @@
5174 #ifndef _ASM_IA64_CACHE_H
5175 #define _ASM_IA64_CACHE_H
5176
5177+#include <linux/const.h>
5178
5179 /*
5180 * Copyright (C) 1998-2000 Hewlett-Packard Co
5181@@ -9,7 +10,7 @@
5182
5183 /* Bytes per L1 (data) cache line. */
5184 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5185-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5186+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5187
5188 #ifdef CONFIG_SMP
5189 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5190diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5191index 5a83c5c..4d7f553 100644
5192--- a/arch/ia64/include/asm/elf.h
5193+++ b/arch/ia64/include/asm/elf.h
5194@@ -42,6 +42,13 @@
5195 */
5196 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5197
5198+#ifdef CONFIG_PAX_ASLR
5199+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5200+
5201+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5202+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5203+#endif
5204+
5205 #define PT_IA_64_UNWIND 0x70000001
5206
5207 /* IA-64 relocations: */
5208diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5209index 5767cdf..7462574 100644
5210--- a/arch/ia64/include/asm/pgalloc.h
5211+++ b/arch/ia64/include/asm/pgalloc.h
5212@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5213 pgd_val(*pgd_entry) = __pa(pud);
5214 }
5215
5216+static inline void
5217+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5218+{
5219+ pgd_populate(mm, pgd_entry, pud);
5220+}
5221+
5222 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5223 {
5224 return quicklist_alloc(0, GFP_KERNEL, NULL);
5225@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5226 pud_val(*pud_entry) = __pa(pmd);
5227 }
5228
5229+static inline void
5230+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5231+{
5232+ pud_populate(mm, pud_entry, pmd);
5233+}
5234+
5235 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5236 {
5237 return quicklist_alloc(0, GFP_KERNEL, NULL);
5238diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5239index 7b6f880..ac8e008 100644
5240--- a/arch/ia64/include/asm/pgtable.h
5241+++ b/arch/ia64/include/asm/pgtable.h
5242@@ -12,7 +12,7 @@
5243 * David Mosberger-Tang <davidm@hpl.hp.com>
5244 */
5245
5246-
5247+#include <linux/const.h>
5248 #include <asm/mman.h>
5249 #include <asm/page.h>
5250 #include <asm/processor.h>
5251@@ -139,6 +139,17 @@
5252 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5253 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5254 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5255+
5256+#ifdef CONFIG_PAX_PAGEEXEC
5257+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5258+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5259+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5260+#else
5261+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5262+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5263+# define PAGE_COPY_NOEXEC PAGE_COPY
5264+#endif
5265+
5266 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5267 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5268 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5269diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5270index 45698cd..e8e2dbc 100644
5271--- a/arch/ia64/include/asm/spinlock.h
5272+++ b/arch/ia64/include/asm/spinlock.h
5273@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5274 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5275
5276 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5277- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5278+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5279 }
5280
5281 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5282diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5283index 4f3fb6cc..254055e 100644
5284--- a/arch/ia64/include/asm/uaccess.h
5285+++ b/arch/ia64/include/asm/uaccess.h
5286@@ -70,6 +70,7 @@
5287 && ((segment).seg == KERNEL_DS.seg \
5288 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5289 })
5290+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5291 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5292
5293 /*
5294@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5295 static inline unsigned long
5296 __copy_to_user (void __user *to, const void *from, unsigned long count)
5297 {
5298+ if (count > INT_MAX)
5299+ return count;
5300+
5301+ if (!__builtin_constant_p(count))
5302+ check_object_size(from, count, true);
5303+
5304 return __copy_user(to, (__force void __user *) from, count);
5305 }
5306
5307 static inline unsigned long
5308 __copy_from_user (void *to, const void __user *from, unsigned long count)
5309 {
5310+ if (count > INT_MAX)
5311+ return count;
5312+
5313+ if (!__builtin_constant_p(count))
5314+ check_object_size(to, count, false);
5315+
5316 return __copy_user((__force void __user *) to, from, count);
5317 }
5318
5319@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5320 ({ \
5321 void __user *__cu_to = (to); \
5322 const void *__cu_from = (from); \
5323- long __cu_len = (n); \
5324+ unsigned long __cu_len = (n); \
5325 \
5326- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5327+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5328+ if (!__builtin_constant_p(n)) \
5329+ check_object_size(__cu_from, __cu_len, true); \
5330 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5331+ } \
5332 __cu_len; \
5333 })
5334
5335@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5336 ({ \
5337 void *__cu_to = (to); \
5338 const void __user *__cu_from = (from); \
5339- long __cu_len = (n); \
5340+ unsigned long __cu_len = (n); \
5341 \
5342 __chk_user_ptr(__cu_from); \
5343- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5344+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5345+ if (!__builtin_constant_p(n)) \
5346+ check_object_size(__cu_to, __cu_len, false); \
5347 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5348+ } \
5349 __cu_len; \
5350 })
5351
5352diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5353index 29754aa..06d2838 100644
5354--- a/arch/ia64/kernel/module.c
5355+++ b/arch/ia64/kernel/module.c
5356@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5357 }
5358
5359 static inline int
5360+in_init_rx (const struct module *mod, uint64_t addr)
5361+{
5362+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5363+}
5364+
5365+static inline int
5366+in_init_rw (const struct module *mod, uint64_t addr)
5367+{
5368+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5369+}
5370+
5371+static inline int
5372 in_init (const struct module *mod, uint64_t addr)
5373 {
5374- return addr - (uint64_t) mod->module_init < mod->init_size;
5375+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5376+}
5377+
5378+static inline int
5379+in_core_rx (const struct module *mod, uint64_t addr)
5380+{
5381+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5382+}
5383+
5384+static inline int
5385+in_core_rw (const struct module *mod, uint64_t addr)
5386+{
5387+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5388 }
5389
5390 static inline int
5391 in_core (const struct module *mod, uint64_t addr)
5392 {
5393- return addr - (uint64_t) mod->module_core < mod->core_size;
5394+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5395 }
5396
5397 static inline int
5398@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5399 break;
5400
5401 case RV_BDREL:
5402- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5403+ if (in_init_rx(mod, val))
5404+ val -= (uint64_t) mod->module_init_rx;
5405+ else if (in_init_rw(mod, val))
5406+ val -= (uint64_t) mod->module_init_rw;
5407+ else if (in_core_rx(mod, val))
5408+ val -= (uint64_t) mod->module_core_rx;
5409+ else if (in_core_rw(mod, val))
5410+ val -= (uint64_t) mod->module_core_rw;
5411 break;
5412
5413 case RV_LTV:
5414@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5415 * addresses have been selected...
5416 */
5417 uint64_t gp;
5418- if (mod->core_size > MAX_LTOFF)
5419+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5420 /*
5421 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5422 * at the end of the module.
5423 */
5424- gp = mod->core_size - MAX_LTOFF / 2;
5425+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5426 else
5427- gp = mod->core_size / 2;
5428- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5429+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5430+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5431 mod->arch.gp = gp;
5432 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5433 }
5434diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5435index c39c3cd..3c77738 100644
5436--- a/arch/ia64/kernel/palinfo.c
5437+++ b/arch/ia64/kernel/palinfo.c
5438@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5439 return NOTIFY_OK;
5440 }
5441
5442-static struct notifier_block __refdata palinfo_cpu_notifier =
5443+static struct notifier_block palinfo_cpu_notifier =
5444 {
5445 .notifier_call = palinfo_cpu_callback,
5446 .priority = 0,
5447diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5448index 41e33f8..65180b2a 100644
5449--- a/arch/ia64/kernel/sys_ia64.c
5450+++ b/arch/ia64/kernel/sys_ia64.c
5451@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5452 unsigned long align_mask = 0;
5453 struct mm_struct *mm = current->mm;
5454 struct vm_unmapped_area_info info;
5455+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5456
5457 if (len > RGN_MAP_LIMIT)
5458 return -ENOMEM;
5459@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5460 if (REGION_NUMBER(addr) == RGN_HPAGE)
5461 addr = 0;
5462 #endif
5463+
5464+#ifdef CONFIG_PAX_RANDMMAP
5465+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5466+ addr = mm->free_area_cache;
5467+ else
5468+#endif
5469+
5470 if (!addr)
5471 addr = TASK_UNMAPPED_BASE;
5472
5473@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5474 info.high_limit = TASK_SIZE;
5475 info.align_mask = align_mask;
5476 info.align_offset = 0;
5477+ info.threadstack_offset = offset;
5478 return vm_unmapped_area(&info);
5479 }
5480
5481diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5482index 84f8a52..7c76178 100644
5483--- a/arch/ia64/kernel/vmlinux.lds.S
5484+++ b/arch/ia64/kernel/vmlinux.lds.S
5485@@ -192,7 +192,7 @@ SECTIONS {
5486 /* Per-cpu data: */
5487 . = ALIGN(PERCPU_PAGE_SIZE);
5488 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5489- __phys_per_cpu_start = __per_cpu_load;
5490+ __phys_per_cpu_start = per_cpu_load;
5491 /*
5492 * ensure percpu data fits
5493 * into percpu page size
5494diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5495index ba5ba7a..36e9d3a 100644
5496--- a/arch/ia64/mm/fault.c
5497+++ b/arch/ia64/mm/fault.c
5498@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5499 return pte_present(pte);
5500 }
5501
5502+#ifdef CONFIG_PAX_PAGEEXEC
5503+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5504+{
5505+ unsigned long i;
5506+
5507+ printk(KERN_ERR "PAX: bytes at PC: ");
5508+ for (i = 0; i < 8; i++) {
5509+ unsigned int c;
5510+ if (get_user(c, (unsigned int *)pc+i))
5511+ printk(KERN_CONT "???????? ");
5512+ else
5513+ printk(KERN_CONT "%08x ", c);
5514+ }
5515+ printk("\n");
5516+}
5517+#endif
5518+
5519 # define VM_READ_BIT 0
5520 # define VM_WRITE_BIT 1
5521 # define VM_EXEC_BIT 2
5522@@ -151,8 +168,21 @@ retry:
5523 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5524 goto bad_area;
5525
5526- if ((vma->vm_flags & mask) != mask)
5527+ if ((vma->vm_flags & mask) != mask) {
5528+
5529+#ifdef CONFIG_PAX_PAGEEXEC
5530+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5531+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5532+ goto bad_area;
5533+
5534+ up_read(&mm->mmap_sem);
5535+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5536+ do_group_exit(SIGKILL);
5537+ }
5538+#endif
5539+
5540 goto bad_area;
5541+ }
5542
5543 /*
5544 * If for any reason at all we couldn't handle the fault, make
5545diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5546index 52b7604b..455cb85 100644
5547--- a/arch/ia64/mm/hugetlbpage.c
5548+++ b/arch/ia64/mm/hugetlbpage.c
5549@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5550 unsigned long pgoff, unsigned long flags)
5551 {
5552 struct vm_unmapped_area_info info;
5553+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5554
5555 if (len > RGN_MAP_LIMIT)
5556 return -ENOMEM;
5557@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5558 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5559 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5560 info.align_offset = 0;
5561+ info.threadstack_offset = offset;
5562 return vm_unmapped_area(&info);
5563 }
5564
5565diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5566index 6b33457..88b5124 100644
5567--- a/arch/ia64/mm/init.c
5568+++ b/arch/ia64/mm/init.c
5569@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5570 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5571 vma->vm_end = vma->vm_start + PAGE_SIZE;
5572 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5573+
5574+#ifdef CONFIG_PAX_PAGEEXEC
5575+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5576+ vma->vm_flags &= ~VM_EXEC;
5577+
5578+#ifdef CONFIG_PAX_MPROTECT
5579+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5580+ vma->vm_flags &= ~VM_MAYEXEC;
5581+#endif
5582+
5583+ }
5584+#endif
5585+
5586 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5587 down_write(&current->mm->mmap_sem);
5588 if (insert_vm_struct(current->mm, vma)) {
5589@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5590 gate_vma.vm_start = FIXADDR_USER_START;
5591 gate_vma.vm_end = FIXADDR_USER_END;
5592 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5593- gate_vma.vm_page_prot = __P101;
5594+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5595
5596 return 0;
5597 }
5598diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5599index 40b3ee98..8c2c112 100644
5600--- a/arch/m32r/include/asm/cache.h
5601+++ b/arch/m32r/include/asm/cache.h
5602@@ -1,8 +1,10 @@
5603 #ifndef _ASM_M32R_CACHE_H
5604 #define _ASM_M32R_CACHE_H
5605
5606+#include <linux/const.h>
5607+
5608 /* L1 cache line size */
5609 #define L1_CACHE_SHIFT 4
5610-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5611+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5612
5613 #endif /* _ASM_M32R_CACHE_H */
5614diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5615index 82abd15..d95ae5d 100644
5616--- a/arch/m32r/lib/usercopy.c
5617+++ b/arch/m32r/lib/usercopy.c
5618@@ -14,6 +14,9 @@
5619 unsigned long
5620 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5621 {
5622+ if ((long)n < 0)
5623+ return n;
5624+
5625 prefetch(from);
5626 if (access_ok(VERIFY_WRITE, to, n))
5627 __copy_user(to,from,n);
5628@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5629 unsigned long
5630 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5631 {
5632+ if ((long)n < 0)
5633+ return n;
5634+
5635 prefetchw(to);
5636 if (access_ok(VERIFY_READ, from, n))
5637 __copy_user_zeroing(to,from,n);
5638diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5639index 0395c51..5f26031 100644
5640--- a/arch/m68k/include/asm/cache.h
5641+++ b/arch/m68k/include/asm/cache.h
5642@@ -4,9 +4,11 @@
5643 #ifndef __ARCH_M68K_CACHE_H
5644 #define __ARCH_M68K_CACHE_H
5645
5646+#include <linux/const.h>
5647+
5648 /* bytes per L1 cache line */
5649 #define L1_CACHE_SHIFT 4
5650-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5651+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5652
5653 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5654
5655diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5656index d703d8e..a8e2d70 100644
5657--- a/arch/metag/include/asm/barrier.h
5658+++ b/arch/metag/include/asm/barrier.h
5659@@ -90,7 +90,7 @@ static inline void fence(void)
5660 do { \
5661 compiletime_assert_atomic_type(*p); \
5662 smp_mb(); \
5663- ACCESS_ONCE(*p) = (v); \
5664+ ACCESS_ONCE_RW(*p) = (v); \
5665 } while (0)
5666
5667 #define smp_load_acquire(p) \
5668diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5669index 7ca80ac..794ba72 100644
5670--- a/arch/metag/mm/hugetlbpage.c
5671+++ b/arch/metag/mm/hugetlbpage.c
5672@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5673 info.high_limit = TASK_SIZE;
5674 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5675 info.align_offset = 0;
5676+ info.threadstack_offset = 0;
5677 return vm_unmapped_area(&info);
5678 }
5679
5680diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5681index 4efe96a..60e8699 100644
5682--- a/arch/microblaze/include/asm/cache.h
5683+++ b/arch/microblaze/include/asm/cache.h
5684@@ -13,11 +13,12 @@
5685 #ifndef _ASM_MICROBLAZE_CACHE_H
5686 #define _ASM_MICROBLAZE_CACHE_H
5687
5688+#include <linux/const.h>
5689 #include <asm/registers.h>
5690
5691 #define L1_CACHE_SHIFT 5
5692 /* word-granular cache in microblaze */
5693-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5694+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5695
5696 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5697
5698diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5699index 1a313c4..f27b613 100644
5700--- a/arch/mips/Kconfig
5701+++ b/arch/mips/Kconfig
5702@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
5703
5704 config KEXEC
5705 bool "Kexec system call"
5706+ depends on !GRKERNSEC_KMEM
5707 help
5708 kexec is a system call that implements the ability to shutdown your
5709 current kernel, and to start another kernel. It is like a reboot
5710diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5711index d8960d4..77dbd31 100644
5712--- a/arch/mips/cavium-octeon/dma-octeon.c
5713+++ b/arch/mips/cavium-octeon/dma-octeon.c
5714@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5715 if (dma_release_from_coherent(dev, order, vaddr))
5716 return;
5717
5718- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5719+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5720 }
5721
5722 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5723diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5724index 26d4363..3c9a82e 100644
5725--- a/arch/mips/include/asm/atomic.h
5726+++ b/arch/mips/include/asm/atomic.h
5727@@ -22,15 +22,39 @@
5728 #include <asm/cmpxchg.h>
5729 #include <asm/war.h>
5730
5731+#ifdef CONFIG_GENERIC_ATOMIC64
5732+#include <asm-generic/atomic64.h>
5733+#endif
5734+
5735 #define ATOMIC_INIT(i) { (i) }
5736
5737+#ifdef CONFIG_64BIT
5738+#define _ASM_EXTABLE(from, to) \
5739+" .section __ex_table,\"a\"\n" \
5740+" .dword " #from ", " #to"\n" \
5741+" .previous\n"
5742+#else
5743+#define _ASM_EXTABLE(from, to) \
5744+" .section __ex_table,\"a\"\n" \
5745+" .word " #from ", " #to"\n" \
5746+" .previous\n"
5747+#endif
5748+
5749 /*
5750 * atomic_read - read atomic variable
5751 * @v: pointer of type atomic_t
5752 *
5753 * Atomically reads the value of @v.
5754 */
5755-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5756+static inline int atomic_read(const atomic_t *v)
5757+{
5758+ return ACCESS_ONCE(v->counter);
5759+}
5760+
5761+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5762+{
5763+ return ACCESS_ONCE(v->counter);
5764+}
5765
5766 /*
5767 * atomic_set - set atomic variable
5768@@ -39,47 +63,77 @@
5769 *
5770 * Atomically sets the value of @v to @i.
5771 */
5772-#define atomic_set(v, i) ((v)->counter = (i))
5773+static inline void atomic_set(atomic_t *v, int i)
5774+{
5775+ v->counter = i;
5776+}
5777
5778-#define ATOMIC_OP(op, c_op, asm_op) \
5779-static __inline__ void atomic_##op(int i, atomic_t * v) \
5780+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5781+{
5782+ v->counter = i;
5783+}
5784+
5785+#ifdef CONFIG_PAX_REFCOUNT
5786+#define __OVERFLOW_POST \
5787+ " b 4f \n" \
5788+ " .set noreorder \n" \
5789+ "3: b 5f \n" \
5790+ " move %0, %1 \n" \
5791+ " .set reorder \n"
5792+#define __OVERFLOW_EXTABLE \
5793+ "3:\n" \
5794+ _ASM_EXTABLE(2b, 3b)
5795+#else
5796+#define __OVERFLOW_POST
5797+#define __OVERFLOW_EXTABLE
5798+#endif
5799+
5800+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5801+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5802 { \
5803 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5804 int temp; \
5805 \
5806 __asm__ __volatile__( \
5807- " .set arch=r4000 \n" \
5808- "1: ll %0, %1 # atomic_" #op " \n" \
5809- " " #asm_op " %0, %2 \n" \
5810+ " .set mips3 \n" \
5811+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5812+ "2: " #asm_op " %0, %2 \n" \
5813 " sc %0, %1 \n" \
5814 " beqzl %0, 1b \n" \
5815+ extable \
5816 " .set mips0 \n" \
5817 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5818 : "Ir" (i)); \
5819 } else if (kernel_uses_llsc) { \
5820 int temp; \
5821 \
5822- do { \
5823- __asm__ __volatile__( \
5824- " .set "MIPS_ISA_LEVEL" \n" \
5825- " ll %0, %1 # atomic_" #op "\n" \
5826- " " #asm_op " %0, %2 \n" \
5827- " sc %0, %1 \n" \
5828- " .set mips0 \n" \
5829- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5830- : "Ir" (i)); \
5831- } while (unlikely(!temp)); \
5832+ __asm__ __volatile__( \
5833+ " .set "MIPS_ISA_LEVEL" \n" \
5834+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5835+ "2: " #asm_op " %0, %2 \n" \
5836+ " sc %0, %1 \n" \
5837+ " beqz %0, 1b \n" \
5838+ extable \
5839+ " .set mips0 \n" \
5840+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5841+ : "Ir" (i)); \
5842 } else { \
5843 unsigned long flags; \
5844 \
5845 raw_local_irq_save(flags); \
5846- v->counter c_op i; \
5847+ __asm__ __volatile__( \
5848+ "2: " #asm_op " %0, %1 \n" \
5849+ extable \
5850+ : "+r" (v->counter) : "Ir" (i)); \
5851 raw_local_irq_restore(flags); \
5852 } \
5853 }
5854
5855-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5856-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5857+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5858+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5859+
5860+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5861+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5862 { \
5863 int result; \
5864 \
5865@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5866 int temp; \
5867 \
5868 __asm__ __volatile__( \
5869- " .set arch=r4000 \n" \
5870- "1: ll %1, %2 # atomic_" #op "_return \n" \
5871- " " #asm_op " %0, %1, %3 \n" \
5872+ " .set mips3 \n" \
5873+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5874+ "2: " #asm_op " %0, %1, %3 \n" \
5875 " sc %0, %2 \n" \
5876 " beqzl %0, 1b \n" \
5877- " " #asm_op " %0, %1, %3 \n" \
5878+ post_op \
5879+ extable \
5880+ "4: " #asm_op " %0, %1, %3 \n" \
5881+ "5: \n" \
5882 " .set mips0 \n" \
5883 : "=&r" (result), "=&r" (temp), \
5884 "+" GCC_OFF_SMALL_ASM() (v->counter) \
5885@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5886 } else if (kernel_uses_llsc) { \
5887 int temp; \
5888 \
5889- do { \
5890- __asm__ __volatile__( \
5891- " .set "MIPS_ISA_LEVEL" \n" \
5892- " ll %1, %2 # atomic_" #op "_return \n" \
5893- " " #asm_op " %0, %1, %3 \n" \
5894- " sc %0, %2 \n" \
5895- " .set mips0 \n" \
5896- : "=&r" (result), "=&r" (temp), \
5897- "+" GCC_OFF_SMALL_ASM() (v->counter) \
5898- : "Ir" (i)); \
5899- } while (unlikely(!result)); \
5900+ __asm__ __volatile__( \
5901+ " .set "MIPS_ISA_LEVEL" \n" \
5902+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5903+ "2: " #asm_op " %0, %1, %3 \n" \
5904+ " sc %0, %2 \n" \
5905+ post_op \
5906+ extable \
5907+ "4: " #asm_op " %0, %1, %3 \n" \
5908+ "5: \n" \
5909+ " .set mips0 \n" \
5910+ : "=&r" (result), "=&r" (temp), \
5911+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
5912+ : "Ir" (i)); \
5913 \
5914 result = temp; result c_op i; \
5915 } else { \
5916 unsigned long flags; \
5917 \
5918 raw_local_irq_save(flags); \
5919- result = v->counter; \
5920- result c_op i; \
5921- v->counter = result; \
5922+ __asm__ __volatile__( \
5923+ " lw %0, %1 \n" \
5924+ "2: " #asm_op " %0, %1, %2 \n" \
5925+ " sw %0, %1 \n" \
5926+ "3: \n" \
5927+ extable \
5928+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5929+ : "Ir" (i)); \
5930 raw_local_irq_restore(flags); \
5931 } \
5932 \
5933@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5934 return result; \
5935 }
5936
5937-#define ATOMIC_OPS(op, c_op, asm_op) \
5938- ATOMIC_OP(op, c_op, asm_op) \
5939- ATOMIC_OP_RETURN(op, c_op, asm_op)
5940+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5941+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5942
5943-ATOMIC_OPS(add, +=, addu)
5944-ATOMIC_OPS(sub, -=, subu)
5945+#define ATOMIC_OPS(op, asm_op) \
5946+ ATOMIC_OP(op, asm_op) \
5947+ ATOMIC_OP_RETURN(op, asm_op)
5948+
5949+ATOMIC_OPS(add, add)
5950+ATOMIC_OPS(sub, sub)
5951
5952 #undef ATOMIC_OPS
5953 #undef ATOMIC_OP_RETURN
5954+#undef __ATOMIC_OP_RETURN
5955 #undef ATOMIC_OP
5956+#undef __ATOMIC_OP
5957
5958 /*
5959 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5960@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5961 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5962 * The function returns the old value of @v minus @i.
5963 */
5964-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5965+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5966 {
5967 int result;
5968
5969@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5970 int temp;
5971
5972 __asm__ __volatile__(
5973- " .set arch=r4000 \n"
5974+ " .set "MIPS_ISA_LEVEL" \n"
5975 "1: ll %1, %2 # atomic_sub_if_positive\n"
5976 " subu %0, %1, %3 \n"
5977 " bltz %0, 1f \n"
5978@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5979 return result;
5980 }
5981
5982-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5983-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5984+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5985+{
5986+ return cmpxchg(&v->counter, old, new);
5987+}
5988+
5989+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5990+ int new)
5991+{
5992+ return cmpxchg(&(v->counter), old, new);
5993+}
5994+
5995+static inline int atomic_xchg(atomic_t *v, int new)
5996+{
5997+ return xchg(&v->counter, new);
5998+}
5999+
6000+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6001+{
6002+ return xchg(&(v->counter), new);
6003+}
6004
6005 /**
6006 * __atomic_add_unless - add unless the number is a given value
6007@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6008
6009 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6010 #define atomic_inc_return(v) atomic_add_return(1, (v))
6011+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6012+{
6013+ return atomic_add_return_unchecked(1, v);
6014+}
6015
6016 /*
6017 * atomic_sub_and_test - subtract value from variable and test result
6018@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6019 * other cases.
6020 */
6021 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6022+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6023+{
6024+ return atomic_add_return_unchecked(1, v) == 0;
6025+}
6026
6027 /*
6028 * atomic_dec_and_test - decrement by 1 and test
6029@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6030 * Atomically increments @v by 1.
6031 */
6032 #define atomic_inc(v) atomic_add(1, (v))
6033+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6034+{
6035+ atomic_add_unchecked(1, v);
6036+}
6037
6038 /*
6039 * atomic_dec - decrement and test
6040@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6041 * Atomically decrements @v by 1.
6042 */
6043 #define atomic_dec(v) atomic_sub(1, (v))
6044+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6045+{
6046+ atomic_sub_unchecked(1, v);
6047+}
6048
6049 /*
6050 * atomic_add_negative - add and test if negative
6051@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6052 * @v: pointer of type atomic64_t
6053 *
6054 */
6055-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6056+static inline long atomic64_read(const atomic64_t *v)
6057+{
6058+ return ACCESS_ONCE(v->counter);
6059+}
6060+
6061+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6062+{
6063+ return ACCESS_ONCE(v->counter);
6064+}
6065
6066 /*
6067 * atomic64_set - set atomic variable
6068 * @v: pointer of type atomic64_t
6069 * @i: required value
6070 */
6071-#define atomic64_set(v, i) ((v)->counter = (i))
6072+static inline void atomic64_set(atomic64_t *v, long i)
6073+{
6074+ v->counter = i;
6075+}
6076
6077-#define ATOMIC64_OP(op, c_op, asm_op) \
6078-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6079+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6080+{
6081+ v->counter = i;
6082+}
6083+
6084+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6085+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6086 { \
6087 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6088 long temp; \
6089 \
6090 __asm__ __volatile__( \
6091- " .set arch=r4000 \n" \
6092- "1: lld %0, %1 # atomic64_" #op " \n" \
6093- " " #asm_op " %0, %2 \n" \
6094+ " .set "MIPS_ISA_LEVEL" \n" \
6095+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6096+ "2: " #asm_op " %0, %2 \n" \
6097 " scd %0, %1 \n" \
6098 " beqzl %0, 1b \n" \
6099+ extable \
6100 " .set mips0 \n" \
6101 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6102 : "Ir" (i)); \
6103 } else if (kernel_uses_llsc) { \
6104 long temp; \
6105 \
6106- do { \
6107- __asm__ __volatile__( \
6108- " .set "MIPS_ISA_LEVEL" \n" \
6109- " lld %0, %1 # atomic64_" #op "\n" \
6110- " " #asm_op " %0, %2 \n" \
6111- " scd %0, %1 \n" \
6112- " .set mips0 \n" \
6113- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6114- : "Ir" (i)); \
6115- } while (unlikely(!temp)); \
6116+ __asm__ __volatile__( \
6117+ " .set "MIPS_ISA_LEVEL" \n" \
6118+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6119+ "2: " #asm_op " %0, %2 \n" \
6120+ " scd %0, %1 \n" \
6121+ " beqz %0, 1b \n" \
6122+ extable \
6123+ " .set mips0 \n" \
6124+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6125+ : "Ir" (i)); \
6126 } else { \
6127 unsigned long flags; \
6128 \
6129 raw_local_irq_save(flags); \
6130- v->counter c_op i; \
6131+ __asm__ __volatile__( \
6132+ "2: " #asm_op " %0, %1 \n" \
6133+ extable \
6134+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6135 raw_local_irq_restore(flags); \
6136 } \
6137 }
6138
6139-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6140-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6141+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6142+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6143+
6144+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6145+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6146 { \
6147 long result; \
6148 \
6149@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6150 long temp; \
6151 \
6152 __asm__ __volatile__( \
6153- " .set arch=r4000 \n" \
6154+ " .set mips3 \n" \
6155 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6156- " " #asm_op " %0, %1, %3 \n" \
6157+ "2: " #asm_op " %0, %1, %3 \n" \
6158 " scd %0, %2 \n" \
6159 " beqzl %0, 1b \n" \
6160- " " #asm_op " %0, %1, %3 \n" \
6161+ post_op \
6162+ extable \
6163+ "4: " #asm_op " %0, %1, %3 \n" \
6164+ "5: \n" \
6165 " .set mips0 \n" \
6166 : "=&r" (result), "=&r" (temp), \
6167 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6168@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6169 } else if (kernel_uses_llsc) { \
6170 long temp; \
6171 \
6172- do { \
6173- __asm__ __volatile__( \
6174- " .set "MIPS_ISA_LEVEL" \n" \
6175- " lld %1, %2 # atomic64_" #op "_return\n" \
6176- " " #asm_op " %0, %1, %3 \n" \
6177- " scd %0, %2 \n" \
6178- " .set mips0 \n" \
6179- : "=&r" (result), "=&r" (temp), \
6180- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6181- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6182- : "memory"); \
6183- } while (unlikely(!result)); \
6184+ __asm__ __volatile__( \
6185+ " .set "MIPS_ISA_LEVEL" \n" \
6186+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6187+ "2: " #asm_op " %0, %1, %3 \n" \
6188+ " scd %0, %2 \n" \
6189+ " beqz %0, 1b \n" \
6190+ post_op \
6191+ extable \
6192+ "4: " #asm_op " %0, %1, %3 \n" \
6193+ "5: \n" \
6194+ " .set mips0 \n" \
6195+ : "=&r" (result), "=&r" (temp), \
6196+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6197+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6198+ : "memory"); \
6199 \
6200 result = temp; result c_op i; \
6201 } else { \
6202 unsigned long flags; \
6203 \
6204 raw_local_irq_save(flags); \
6205- result = v->counter; \
6206- result c_op i; \
6207- v->counter = result; \
6208+ __asm__ __volatile__( \
6209+ " ld %0, %1 \n" \
6210+ "2: " #asm_op " %0, %1, %2 \n" \
6211+ " sd %0, %1 \n" \
6212+ "3: \n" \
6213+ extable \
6214+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6215+ : "Ir" (i)); \
6216 raw_local_irq_restore(flags); \
6217 } \
6218 \
6219@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6220 return result; \
6221 }
6222
6223-#define ATOMIC64_OPS(op, c_op, asm_op) \
6224- ATOMIC64_OP(op, c_op, asm_op) \
6225- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6226+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6227+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6228
6229-ATOMIC64_OPS(add, +=, daddu)
6230-ATOMIC64_OPS(sub, -=, dsubu)
6231+#define ATOMIC64_OPS(op, asm_op) \
6232+ ATOMIC64_OP(op, asm_op) \
6233+ ATOMIC64_OP_RETURN(op, asm_op)
6234+
6235+ATOMIC64_OPS(add, dadd)
6236+ATOMIC64_OPS(sub, dsub)
6237
6238 #undef ATOMIC64_OPS
6239 #undef ATOMIC64_OP_RETURN
6240+#undef __ATOMIC64_OP_RETURN
6241 #undef ATOMIC64_OP
6242+#undef __ATOMIC64_OP
6243+#undef __OVERFLOW_EXTABLE
6244+#undef __OVERFLOW_POST
6245
6246 /*
6247 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6248@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6249 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6250 * The function returns the old value of @v minus @i.
6251 */
6252-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6253+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6254 {
6255 long result;
6256
6257@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6258 long temp;
6259
6260 __asm__ __volatile__(
6261- " .set arch=r4000 \n"
6262+ " .set "MIPS_ISA_LEVEL" \n"
6263 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6264 " dsubu %0, %1, %3 \n"
6265 " bltz %0, 1f \n"
6266@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6267 return result;
6268 }
6269
6270-#define atomic64_cmpxchg(v, o, n) \
6271- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6272-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6273+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6274+{
6275+ return cmpxchg(&v->counter, old, new);
6276+}
6277+
6278+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6279+ long new)
6280+{
6281+ return cmpxchg(&(v->counter), old, new);
6282+}
6283+
6284+static inline long atomic64_xchg(atomic64_t *v, long new)
6285+{
6286+ return xchg(&v->counter, new);
6287+}
6288+
6289+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6290+{
6291+ return xchg(&(v->counter), new);
6292+}
6293
6294 /**
6295 * atomic64_add_unless - add unless the number is a given value
6296@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6297
6298 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6299 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6300+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6301
6302 /*
6303 * atomic64_sub_and_test - subtract value from variable and test result
6304@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305 * other cases.
6306 */
6307 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6308+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6309
6310 /*
6311 * atomic64_dec_and_test - decrement by 1 and test
6312@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * Atomically increments @v by 1.
6314 */
6315 #define atomic64_inc(v) atomic64_add(1, (v))
6316+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6317
6318 /*
6319 * atomic64_dec - decrement and test
6320@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically decrements @v by 1.
6322 */
6323 #define atomic64_dec(v) atomic64_sub(1, (v))
6324+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_add_negative - add and test if negative
6328diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6329index 2b8bbbc..4556df6 100644
6330--- a/arch/mips/include/asm/barrier.h
6331+++ b/arch/mips/include/asm/barrier.h
6332@@ -133,7 +133,7 @@
6333 do { \
6334 compiletime_assert_atomic_type(*p); \
6335 smp_mb(); \
6336- ACCESS_ONCE(*p) = (v); \
6337+ ACCESS_ONCE_RW(*p) = (v); \
6338 } while (0)
6339
6340 #define smp_load_acquire(p) \
6341diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6342index b4db69f..8f3b093 100644
6343--- a/arch/mips/include/asm/cache.h
6344+++ b/arch/mips/include/asm/cache.h
6345@@ -9,10 +9,11 @@
6346 #ifndef _ASM_CACHE_H
6347 #define _ASM_CACHE_H
6348
6349+#include <linux/const.h>
6350 #include <kmalloc.h>
6351
6352 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6353-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6354+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6355
6356 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6357 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6358diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6359index 694925a..990fa62 100644
6360--- a/arch/mips/include/asm/elf.h
6361+++ b/arch/mips/include/asm/elf.h
6362@@ -410,15 +410,18 @@ extern const char *__elf_platform;
6363 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6364 #endif
6365
6366+#ifdef CONFIG_PAX_ASLR
6367+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6368+
6369+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6370+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6371+#endif
6372+
6373 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6374 struct linux_binprm;
6375 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6376 int uses_interp);
6377
6378-struct mm_struct;
6379-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6380-#define arch_randomize_brk arch_randomize_brk
6381-
6382 struct arch_elf_state {
6383 int fp_abi;
6384 int interp_fp_abi;
6385diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6386index c1f6afa..38cc6e9 100644
6387--- a/arch/mips/include/asm/exec.h
6388+++ b/arch/mips/include/asm/exec.h
6389@@ -12,6 +12,6 @@
6390 #ifndef _ASM_EXEC_H
6391 #define _ASM_EXEC_H
6392
6393-extern unsigned long arch_align_stack(unsigned long sp);
6394+#define arch_align_stack(x) ((x) & ~0xfUL)
6395
6396 #endif /* _ASM_EXEC_H */
6397diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6398index 9e8ef59..1139d6b 100644
6399--- a/arch/mips/include/asm/hw_irq.h
6400+++ b/arch/mips/include/asm/hw_irq.h
6401@@ -10,7 +10,7 @@
6402
6403 #include <linux/atomic.h>
6404
6405-extern atomic_t irq_err_count;
6406+extern atomic_unchecked_t irq_err_count;
6407
6408 /*
6409 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6410diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6411index 8feaed6..1bd8a64 100644
6412--- a/arch/mips/include/asm/local.h
6413+++ b/arch/mips/include/asm/local.h
6414@@ -13,15 +13,25 @@ typedef struct
6415 atomic_long_t a;
6416 } local_t;
6417
6418+typedef struct {
6419+ atomic_long_unchecked_t a;
6420+} local_unchecked_t;
6421+
6422 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6423
6424 #define local_read(l) atomic_long_read(&(l)->a)
6425+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6426 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6427+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6428
6429 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6430+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6431 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6432+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6433 #define local_inc(l) atomic_long_inc(&(l)->a)
6434+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6435 #define local_dec(l) atomic_long_dec(&(l)->a)
6436+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6437
6438 /*
6439 * Same as above, but return the result value
6440@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6441 return result;
6442 }
6443
6444+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6445+{
6446+ unsigned long result;
6447+
6448+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6449+ unsigned long temp;
6450+
6451+ __asm__ __volatile__(
6452+ " .set mips3 \n"
6453+ "1:" __LL "%1, %2 # local_add_return \n"
6454+ " addu %0, %1, %3 \n"
6455+ __SC "%0, %2 \n"
6456+ " beqzl %0, 1b \n"
6457+ " addu %0, %1, %3 \n"
6458+ " .set mips0 \n"
6459+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6460+ : "Ir" (i), "m" (l->a.counter)
6461+ : "memory");
6462+ } else if (kernel_uses_llsc) {
6463+ unsigned long temp;
6464+
6465+ __asm__ __volatile__(
6466+ " .set mips3 \n"
6467+ "1:" __LL "%1, %2 # local_add_return \n"
6468+ " addu %0, %1, %3 \n"
6469+ __SC "%0, %2 \n"
6470+ " beqz %0, 1b \n"
6471+ " addu %0, %1, %3 \n"
6472+ " .set mips0 \n"
6473+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6474+ : "Ir" (i), "m" (l->a.counter)
6475+ : "memory");
6476+ } else {
6477+ unsigned long flags;
6478+
6479+ local_irq_save(flags);
6480+ result = l->a.counter;
6481+ result += i;
6482+ l->a.counter = result;
6483+ local_irq_restore(flags);
6484+ }
6485+
6486+ return result;
6487+}
6488+
6489 static __inline__ long local_sub_return(long i, local_t * l)
6490 {
6491 unsigned long result;
6492@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6493
6494 #define local_cmpxchg(l, o, n) \
6495 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6496+#define local_cmpxchg_unchecked(l, o, n) \
6497+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6498 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6499
6500 /**
6501diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6502index 154b70a..426ae3d 100644
6503--- a/arch/mips/include/asm/page.h
6504+++ b/arch/mips/include/asm/page.h
6505@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6506 #ifdef CONFIG_CPU_MIPS32
6507 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6508 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6509- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6510+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6511 #else
6512 typedef struct { unsigned long long pte; } pte_t;
6513 #define pte_val(x) ((x).pte)
6514diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6515index b336037..5b874cc 100644
6516--- a/arch/mips/include/asm/pgalloc.h
6517+++ b/arch/mips/include/asm/pgalloc.h
6518@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6519 {
6520 set_pud(pud, __pud((unsigned long)pmd));
6521 }
6522+
6523+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6524+{
6525+ pud_populate(mm, pud, pmd);
6526+}
6527 #endif
6528
6529 /*
6530diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6531index f8f809f..b5f3fa4 100644
6532--- a/arch/mips/include/asm/pgtable.h
6533+++ b/arch/mips/include/asm/pgtable.h
6534@@ -20,6 +20,9 @@
6535 #include <asm/io.h>
6536 #include <asm/pgtable-bits.h>
6537
6538+#define ktla_ktva(addr) (addr)
6539+#define ktva_ktla(addr) (addr)
6540+
6541 struct mm_struct;
6542 struct vm_area_struct;
6543
6544diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6545index 55ed660..3dc9422 100644
6546--- a/arch/mips/include/asm/thread_info.h
6547+++ b/arch/mips/include/asm/thread_info.h
6548@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
6549 #define TIF_SECCOMP 4 /* secure computing */
6550 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6551 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6552+/* li takes a 32bit immediate */
6553+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6554+
6555 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6556 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6557 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6558@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
6559 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6560 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6561 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6562+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6563
6564 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6565 _TIF_SYSCALL_AUDIT | \
6566- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6567+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6568+ _TIF_GRSEC_SETXID)
6569
6570 /* work to do in syscall_trace_leave() */
6571 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6572- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6573+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6574
6575 /* work to do on interrupt/exception return */
6576 #define _TIF_WORK_MASK \
6577@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
6578 /* work to do on any return to u-space */
6579 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6580 _TIF_WORK_SYSCALL_EXIT | \
6581- _TIF_SYSCALL_TRACEPOINT)
6582+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6583
6584 /*
6585 * We stash processor id into a COP0 register to retrieve it fast
6586diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6587index bf8b324..cec5705 100644
6588--- a/arch/mips/include/asm/uaccess.h
6589+++ b/arch/mips/include/asm/uaccess.h
6590@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6591 __ok == 0; \
6592 })
6593
6594+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6595 #define access_ok(type, addr, size) \
6596 likely(__access_ok((addr), (size), __access_mask))
6597
6598diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6599index 1188e00..41cf144 100644
6600--- a/arch/mips/kernel/binfmt_elfn32.c
6601+++ b/arch/mips/kernel/binfmt_elfn32.c
6602@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6603 #undef ELF_ET_DYN_BASE
6604 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6605
6606+#ifdef CONFIG_PAX_ASLR
6607+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6608+
6609+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6610+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6611+#endif
6612+
6613 #include <asm/processor.h>
6614 #include <linux/module.h>
6615 #include <linux/elfcore.h>
6616diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6617index 9287678..f870e47 100644
6618--- a/arch/mips/kernel/binfmt_elfo32.c
6619+++ b/arch/mips/kernel/binfmt_elfo32.c
6620@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6621 #undef ELF_ET_DYN_BASE
6622 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6623
6624+#ifdef CONFIG_PAX_ASLR
6625+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6626+
6627+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6628+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6629+#endif
6630+
6631 #include <asm/processor.h>
6632
6633 #include <linux/module.h>
6634diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6635index a74ec3a..4f06f18 100644
6636--- a/arch/mips/kernel/i8259.c
6637+++ b/arch/mips/kernel/i8259.c
6638@@ -202,7 +202,7 @@ spurious_8259A_irq:
6639 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6640 spurious_irq_mask |= irqmask;
6641 }
6642- atomic_inc(&irq_err_count);
6643+ atomic_inc_unchecked(&irq_err_count);
6644 /*
6645 * Theoretically we do not have to handle this IRQ,
6646 * but in Linux this does not cause problems and is
6647diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6648index 44a1f79..2bd6aa3 100644
6649--- a/arch/mips/kernel/irq-gt641xx.c
6650+++ b/arch/mips/kernel/irq-gt641xx.c
6651@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6652 }
6653 }
6654
6655- atomic_inc(&irq_err_count);
6656+ atomic_inc_unchecked(&irq_err_count);
6657 }
6658
6659 void __init gt641xx_irq_init(void)
6660diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6661index d2bfbc2..a8eacd2 100644
6662--- a/arch/mips/kernel/irq.c
6663+++ b/arch/mips/kernel/irq.c
6664@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6665 printk("unexpected IRQ # %d\n", irq);
6666 }
6667
6668-atomic_t irq_err_count;
6669+atomic_unchecked_t irq_err_count;
6670
6671 int arch_show_interrupts(struct seq_file *p, int prec)
6672 {
6673- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6674+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6675 return 0;
6676 }
6677
6678 asmlinkage void spurious_interrupt(void)
6679 {
6680- atomic_inc(&irq_err_count);
6681+ atomic_inc_unchecked(&irq_err_count);
6682 }
6683
6684 void __init init_IRQ(void)
6685@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6686 #endif
6687 }
6688
6689+
6690 #ifdef DEBUG_STACKOVERFLOW
6691+extern void gr_handle_kernel_exploit(void);
6692+
6693 static inline void check_stack_overflow(void)
6694 {
6695 unsigned long sp;
6696@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6697 printk("do_IRQ: stack overflow: %ld\n",
6698 sp - sizeof(struct thread_info));
6699 dump_stack();
6700+ gr_handle_kernel_exploit();
6701 }
6702 }
6703 #else
6704diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6705index 0614717..002fa43 100644
6706--- a/arch/mips/kernel/pm-cps.c
6707+++ b/arch/mips/kernel/pm-cps.c
6708@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6709 nc_core_ready_count = nc_addr;
6710
6711 /* Ensure ready_count is zero-initialised before the assembly runs */
6712- ACCESS_ONCE(*nc_core_ready_count) = 0;
6713+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6714 coupled_barrier(&per_cpu(pm_barrier, core), online);
6715
6716 /* Run the generated entry code */
6717diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6718index bf85cc1..b365c61 100644
6719--- a/arch/mips/kernel/process.c
6720+++ b/arch/mips/kernel/process.c
6721@@ -535,18 +535,6 @@ out:
6722 return pc;
6723 }
6724
6725-/*
6726- * Don't forget that the stack pointer must be aligned on a 8 bytes
6727- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6728- */
6729-unsigned long arch_align_stack(unsigned long sp)
6730-{
6731- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6732- sp -= get_random_int() & ~PAGE_MASK;
6733-
6734- return sp & ALMASK;
6735-}
6736-
6737 static void arch_dump_stack(void *info)
6738 {
6739 struct pt_regs *regs;
6740diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6741index 5104528..950bbdc 100644
6742--- a/arch/mips/kernel/ptrace.c
6743+++ b/arch/mips/kernel/ptrace.c
6744@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6745 return ret;
6746 }
6747
6748+#ifdef CONFIG_GRKERNSEC_SETXID
6749+extern void gr_delayed_cred_worker(void);
6750+#endif
6751+
6752 /*
6753 * Notification of system call entry/exit
6754 * - triggered by current->work.syscall_trace
6755@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6756 tracehook_report_syscall_entry(regs))
6757 ret = -1;
6758
6759+#ifdef CONFIG_GRKERNSEC_SETXID
6760+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6761+ gr_delayed_cred_worker();
6762+#endif
6763+
6764 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6765 trace_sys_enter(regs, regs->regs[2]);
6766
6767diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6768index 07fc524..b9d7f28 100644
6769--- a/arch/mips/kernel/reset.c
6770+++ b/arch/mips/kernel/reset.c
6771@@ -13,6 +13,7 @@
6772 #include <linux/reboot.h>
6773
6774 #include <asm/reboot.h>
6775+#include <asm/bug.h>
6776
6777 /*
6778 * Urgs ... Too many MIPS machines to handle this in a generic way.
6779@@ -29,16 +30,19 @@ void machine_restart(char *command)
6780 {
6781 if (_machine_restart)
6782 _machine_restart(command);
6783+ BUG();
6784 }
6785
6786 void machine_halt(void)
6787 {
6788 if (_machine_halt)
6789 _machine_halt();
6790+ BUG();
6791 }
6792
6793 void machine_power_off(void)
6794 {
6795 if (pm_power_off)
6796 pm_power_off();
6797+ BUG();
6798 }
6799diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6800index 2242bdd..b284048 100644
6801--- a/arch/mips/kernel/sync-r4k.c
6802+++ b/arch/mips/kernel/sync-r4k.c
6803@@ -18,8 +18,8 @@
6804 #include <asm/mipsregs.h>
6805
6806 static atomic_t count_start_flag = ATOMIC_INIT(0);
6807-static atomic_t count_count_start = ATOMIC_INIT(0);
6808-static atomic_t count_count_stop = ATOMIC_INIT(0);
6809+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6810+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6811 static atomic_t count_reference = ATOMIC_INIT(0);
6812
6813 #define COUNTON 100
6814@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6815
6816 for (i = 0; i < NR_LOOPS; i++) {
6817 /* slaves loop on '!= 2' */
6818- while (atomic_read(&count_count_start) != 1)
6819+ while (atomic_read_unchecked(&count_count_start) != 1)
6820 mb();
6821- atomic_set(&count_count_stop, 0);
6822+ atomic_set_unchecked(&count_count_stop, 0);
6823 smp_wmb();
6824
6825 /* this lets the slaves write their count register */
6826- atomic_inc(&count_count_start);
6827+ atomic_inc_unchecked(&count_count_start);
6828
6829 /*
6830 * Everyone initialises count in the last loop:
6831@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6832 /*
6833 * Wait for all slaves to leave the synchronization point:
6834 */
6835- while (atomic_read(&count_count_stop) != 1)
6836+ while (atomic_read_unchecked(&count_count_stop) != 1)
6837 mb();
6838- atomic_set(&count_count_start, 0);
6839+ atomic_set_unchecked(&count_count_start, 0);
6840 smp_wmb();
6841- atomic_inc(&count_count_stop);
6842+ atomic_inc_unchecked(&count_count_stop);
6843 }
6844 /* Arrange for an interrupt in a short while */
6845 write_c0_compare(read_c0_count() + COUNTON);
6846@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6847 initcount = atomic_read(&count_reference);
6848
6849 for (i = 0; i < NR_LOOPS; i++) {
6850- atomic_inc(&count_count_start);
6851- while (atomic_read(&count_count_start) != 2)
6852+ atomic_inc_unchecked(&count_count_start);
6853+ while (atomic_read_unchecked(&count_count_start) != 2)
6854 mb();
6855
6856 /*
6857@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6858 if (i == NR_LOOPS-1)
6859 write_c0_count(initcount);
6860
6861- atomic_inc(&count_count_stop);
6862- while (atomic_read(&count_count_stop) != 2)
6863+ atomic_inc_unchecked(&count_count_stop);
6864+ while (atomic_read_unchecked(&count_count_stop) != 2)
6865 mb();
6866 }
6867 /* Arrange for an interrupt in a short while */
6868diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6869index 33984c0..666a96d 100644
6870--- a/arch/mips/kernel/traps.c
6871+++ b/arch/mips/kernel/traps.c
6872@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6873 siginfo_t info;
6874
6875 prev_state = exception_enter();
6876- die_if_kernel("Integer overflow", regs);
6877+ if (unlikely(!user_mode(regs))) {
6878+
6879+#ifdef CONFIG_PAX_REFCOUNT
6880+ if (fixup_exception(regs)) {
6881+ pax_report_refcount_overflow(regs);
6882+ exception_exit(prev_state);
6883+ return;
6884+ }
6885+#endif
6886+
6887+ die("Integer overflow", regs);
6888+ }
6889
6890 info.si_code = FPE_INTOVF;
6891 info.si_signo = SIGFPE;
6892diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6893index f5e7dda..47198ec 100644
6894--- a/arch/mips/kvm/mips.c
6895+++ b/arch/mips/kvm/mips.c
6896@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6897 return r;
6898 }
6899
6900-int kvm_arch_init(void *opaque)
6901+int kvm_arch_init(const void *opaque)
6902 {
6903 if (kvm_mips_callbacks) {
6904 kvm_err("kvm: module already exists\n");
6905diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6906index 7ff8637..6004edb 100644
6907--- a/arch/mips/mm/fault.c
6908+++ b/arch/mips/mm/fault.c
6909@@ -31,6 +31,23 @@
6910
6911 int show_unhandled_signals = 1;
6912
6913+#ifdef CONFIG_PAX_PAGEEXEC
6914+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6915+{
6916+ unsigned long i;
6917+
6918+ printk(KERN_ERR "PAX: bytes at PC: ");
6919+ for (i = 0; i < 5; i++) {
6920+ unsigned int c;
6921+ if (get_user(c, (unsigned int *)pc+i))
6922+ printk(KERN_CONT "???????? ");
6923+ else
6924+ printk(KERN_CONT "%08x ", c);
6925+ }
6926+ printk("\n");
6927+}
6928+#endif
6929+
6930 /*
6931 * This routine handles page faults. It determines the address,
6932 * and the problem, and then passes it off to one of the appropriate
6933@@ -206,6 +223,14 @@ bad_area:
6934 bad_area_nosemaphore:
6935 /* User mode accesses just cause a SIGSEGV */
6936 if (user_mode(regs)) {
6937+
6938+#ifdef CONFIG_PAX_PAGEEXEC
6939+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6940+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6941+ do_group_exit(SIGKILL);
6942+ }
6943+#endif
6944+
6945 tsk->thread.cp0_badvaddr = address;
6946 tsk->thread.error_code = write;
6947 if (show_unhandled_signals &&
6948diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6949index f1baadd..5472dca 100644
6950--- a/arch/mips/mm/mmap.c
6951+++ b/arch/mips/mm/mmap.c
6952@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6953 struct vm_area_struct *vma;
6954 unsigned long addr = addr0;
6955 int do_color_align;
6956+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6957 struct vm_unmapped_area_info info;
6958
6959 if (unlikely(len > TASK_SIZE))
6960@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 do_color_align = 1;
6962
6963 /* requesting a specific address */
6964+
6965+#ifdef CONFIG_PAX_RANDMMAP
6966+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6967+#endif
6968+
6969 if (addr) {
6970 if (do_color_align)
6971 addr = COLOUR_ALIGN(addr, pgoff);
6972@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6973 addr = PAGE_ALIGN(addr);
6974
6975 vma = find_vma(mm, addr);
6976- if (TASK_SIZE - len >= addr &&
6977- (!vma || addr + len <= vma->vm_start))
6978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6979 return addr;
6980 }
6981
6982 info.length = len;
6983 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6984 info.align_offset = pgoff << PAGE_SHIFT;
6985+ info.threadstack_offset = offset;
6986
6987 if (dir == DOWN) {
6988 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6989@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6990 {
6991 unsigned long random_factor = 0UL;
6992
6993+#ifdef CONFIG_PAX_RANDMMAP
6994+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6995+#endif
6996+
6997 if (current->flags & PF_RANDOMIZE) {
6998 random_factor = get_random_int();
6999 random_factor = random_factor << PAGE_SHIFT;
7000@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7001
7002 if (mmap_is_legacy()) {
7003 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7004+
7005+#ifdef CONFIG_PAX_RANDMMAP
7006+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7007+ mm->mmap_base += mm->delta_mmap;
7008+#endif
7009+
7010 mm->get_unmapped_area = arch_get_unmapped_area;
7011 } else {
7012 mm->mmap_base = mmap_base(random_factor);
7013+
7014+#ifdef CONFIG_PAX_RANDMMAP
7015+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7016+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7017+#endif
7018+
7019 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7020 }
7021 }
7022
7023-static inline unsigned long brk_rnd(void)
7024-{
7025- unsigned long rnd = get_random_int();
7026-
7027- rnd = rnd << PAGE_SHIFT;
7028- /* 8MB for 32bit, 256MB for 64bit */
7029- if (TASK_IS_32BIT_ADDR)
7030- rnd = rnd & 0x7ffffful;
7031- else
7032- rnd = rnd & 0xffffffful;
7033-
7034- return rnd;
7035-}
7036-
7037-unsigned long arch_randomize_brk(struct mm_struct *mm)
7038-{
7039- unsigned long base = mm->brk;
7040- unsigned long ret;
7041-
7042- ret = PAGE_ALIGN(base + brk_rnd());
7043-
7044- if (ret < mm->brk)
7045- return mm->brk;
7046-
7047- return ret;
7048-}
7049-
7050 int __virt_addr_valid(const volatile void *kaddr)
7051 {
7052 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7053diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7054index a2358b4..7cead4f 100644
7055--- a/arch/mips/sgi-ip27/ip27-nmi.c
7056+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7057@@ -187,9 +187,9 @@ void
7058 cont_nmi_dump(void)
7059 {
7060 #ifndef REAL_NMI_SIGNAL
7061- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7062+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7063
7064- atomic_inc(&nmied_cpus);
7065+ atomic_inc_unchecked(&nmied_cpus);
7066 #endif
7067 /*
7068 * Only allow 1 cpu to proceed
7069@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7070 udelay(10000);
7071 }
7072 #else
7073- while (atomic_read(&nmied_cpus) != num_online_cpus());
7074+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7075 #endif
7076
7077 /*
7078diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7079index a046b30..6799527 100644
7080--- a/arch/mips/sni/rm200.c
7081+++ b/arch/mips/sni/rm200.c
7082@@ -270,7 +270,7 @@ spurious_8259A_irq:
7083 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7084 spurious_irq_mask |= irqmask;
7085 }
7086- atomic_inc(&irq_err_count);
7087+ atomic_inc_unchecked(&irq_err_count);
7088 /*
7089 * Theoretically we do not have to handle this IRQ,
7090 * but in Linux this does not cause problems and is
7091diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7092index 41e873b..34d33a7 100644
7093--- a/arch/mips/vr41xx/common/icu.c
7094+++ b/arch/mips/vr41xx/common/icu.c
7095@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7096
7097 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7098
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101
7102 return -1;
7103 }
7104diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7105index ae0e4ee..e8f0692 100644
7106--- a/arch/mips/vr41xx/common/irq.c
7107+++ b/arch/mips/vr41xx/common/irq.c
7108@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7109 irq_cascade_t *cascade;
7110
7111 if (irq >= NR_IRQS) {
7112- atomic_inc(&irq_err_count);
7113+ atomic_inc_unchecked(&irq_err_count);
7114 return;
7115 }
7116
7117@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7118 ret = cascade->get_irq(irq);
7119 irq = ret;
7120 if (ret < 0)
7121- atomic_inc(&irq_err_count);
7122+ atomic_inc_unchecked(&irq_err_count);
7123 else
7124 irq_dispatch(irq);
7125 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7126diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7127index 967d144..db12197 100644
7128--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7129+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7130@@ -11,12 +11,14 @@
7131 #ifndef _ASM_PROC_CACHE_H
7132 #define _ASM_PROC_CACHE_H
7133
7134+#include <linux/const.h>
7135+
7136 /* L1 cache */
7137
7138 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7139 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7140-#define L1_CACHE_BYTES 16 /* bytes per entry */
7141 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7143 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7144
7145 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7146diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7147index bcb5df2..84fabd2 100644
7148--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7149+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7150@@ -16,13 +16,15 @@
7151 #ifndef _ASM_PROC_CACHE_H
7152 #define _ASM_PROC_CACHE_H
7153
7154+#include <linux/const.h>
7155+
7156 /*
7157 * L1 cache
7158 */
7159 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7160 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7161-#define L1_CACHE_BYTES 32 /* bytes per entry */
7162 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7164 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7165
7166 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7167diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7168index 4ce7a01..449202a 100644
7169--- a/arch/openrisc/include/asm/cache.h
7170+++ b/arch/openrisc/include/asm/cache.h
7171@@ -19,11 +19,13 @@
7172 #ifndef __ASM_OPENRISC_CACHE_H
7173 #define __ASM_OPENRISC_CACHE_H
7174
7175+#include <linux/const.h>
7176+
7177 /* FIXME: How can we replace these with values from the CPU...
7178 * they shouldn't be hard-coded!
7179 */
7180
7181-#define L1_CACHE_BYTES 16
7182 #define L1_CACHE_SHIFT 4
7183+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7184
7185 #endif /* __ASM_OPENRISC_CACHE_H */
7186diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7187index 226f8ca..9d9b87d 100644
7188--- a/arch/parisc/include/asm/atomic.h
7189+++ b/arch/parisc/include/asm/atomic.h
7190@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7191 return dec;
7192 }
7193
7194+#define atomic64_read_unchecked(v) atomic64_read(v)
7195+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7196+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7197+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7198+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7199+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7200+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7201+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7202+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7203+
7204 #endif /* !CONFIG_64BIT */
7205
7206
7207diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7208index 47f11c7..3420df2 100644
7209--- a/arch/parisc/include/asm/cache.h
7210+++ b/arch/parisc/include/asm/cache.h
7211@@ -5,6 +5,7 @@
7212 #ifndef __ARCH_PARISC_CACHE_H
7213 #define __ARCH_PARISC_CACHE_H
7214
7215+#include <linux/const.h>
7216
7217 /*
7218 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7219@@ -15,13 +16,13 @@
7220 * just ruin performance.
7221 */
7222 #ifdef CONFIG_PA20
7223-#define L1_CACHE_BYTES 64
7224 #define L1_CACHE_SHIFT 6
7225 #else
7226-#define L1_CACHE_BYTES 32
7227 #define L1_CACHE_SHIFT 5
7228 #endif
7229
7230+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7231+
7232 #ifndef __ASSEMBLY__
7233
7234 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7235diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7236index 3391d06..c23a2cc 100644
7237--- a/arch/parisc/include/asm/elf.h
7238+++ b/arch/parisc/include/asm/elf.h
7239@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7240
7241 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7242
7243+#ifdef CONFIG_PAX_ASLR
7244+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7245+
7246+#define PAX_DELTA_MMAP_LEN 16
7247+#define PAX_DELTA_STACK_LEN 16
7248+#endif
7249+
7250 /* This yields a mask that user programs can use to figure out what
7251 instruction set this CPU supports. This could be done in user space,
7252 but it's not easy, and we've already done it here. */
7253diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7254index d174372..f27fe5c 100644
7255--- a/arch/parisc/include/asm/pgalloc.h
7256+++ b/arch/parisc/include/asm/pgalloc.h
7257@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7258 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7259 }
7260
7261+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7262+{
7263+ pgd_populate(mm, pgd, pmd);
7264+}
7265+
7266 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7267 {
7268 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7269@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7270 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7271 #define pmd_free(mm, x) do { } while (0)
7272 #define pgd_populate(mm, pmd, pte) BUG()
7273+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7274
7275 #endif
7276
7277diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7278index 15207b9..3209e65 100644
7279--- a/arch/parisc/include/asm/pgtable.h
7280+++ b/arch/parisc/include/asm/pgtable.h
7281@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7282 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7283 #define PAGE_COPY PAGE_EXECREAD
7284 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7288+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7289+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7290+#else
7291+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7292+# define PAGE_COPY_NOEXEC PAGE_COPY
7293+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7294+#endif
7295+
7296 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7297 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7298 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7299diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7300index 0abdd4c..1af92f0 100644
7301--- a/arch/parisc/include/asm/uaccess.h
7302+++ b/arch/parisc/include/asm/uaccess.h
7303@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7304 const void __user *from,
7305 unsigned long n)
7306 {
7307- int sz = __compiletime_object_size(to);
7308+ size_t sz = __compiletime_object_size(to);
7309 int ret = -EFAULT;
7310
7311- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7312+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7313 ret = __copy_from_user(to, from, n);
7314 else
7315 copy_from_user_overflow();
7316diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7317index 3c63a82..b1d6ee9 100644
7318--- a/arch/parisc/kernel/module.c
7319+++ b/arch/parisc/kernel/module.c
7320@@ -98,16 +98,38 @@
7321
7322 /* three functions to determine where in the module core
7323 * or init pieces the location is */
7324+static inline int in_init_rx(struct module *me, void *loc)
7325+{
7326+ return (loc >= me->module_init_rx &&
7327+ loc < (me->module_init_rx + me->init_size_rx));
7328+}
7329+
7330+static inline int in_init_rw(struct module *me, void *loc)
7331+{
7332+ return (loc >= me->module_init_rw &&
7333+ loc < (me->module_init_rw + me->init_size_rw));
7334+}
7335+
7336 static inline int in_init(struct module *me, void *loc)
7337 {
7338- return (loc >= me->module_init &&
7339- loc <= (me->module_init + me->init_size));
7340+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7341+}
7342+
7343+static inline int in_core_rx(struct module *me, void *loc)
7344+{
7345+ return (loc >= me->module_core_rx &&
7346+ loc < (me->module_core_rx + me->core_size_rx));
7347+}
7348+
7349+static inline int in_core_rw(struct module *me, void *loc)
7350+{
7351+ return (loc >= me->module_core_rw &&
7352+ loc < (me->module_core_rw + me->core_size_rw));
7353 }
7354
7355 static inline int in_core(struct module *me, void *loc)
7356 {
7357- return (loc >= me->module_core &&
7358- loc <= (me->module_core + me->core_size));
7359+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7360 }
7361
7362 static inline int in_local(struct module *me, void *loc)
7363@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7364 }
7365
7366 /* align things a bit */
7367- me->core_size = ALIGN(me->core_size, 16);
7368- me->arch.got_offset = me->core_size;
7369- me->core_size += gots * sizeof(struct got_entry);
7370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7371+ me->arch.got_offset = me->core_size_rw;
7372+ me->core_size_rw += gots * sizeof(struct got_entry);
7373
7374- me->core_size = ALIGN(me->core_size, 16);
7375- me->arch.fdesc_offset = me->core_size;
7376- me->core_size += fdescs * sizeof(Elf_Fdesc);
7377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7378+ me->arch.fdesc_offset = me->core_size_rw;
7379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7380
7381 me->arch.got_max = gots;
7382 me->arch.fdesc_max = fdescs;
7383@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7384
7385 BUG_ON(value == 0);
7386
7387- got = me->module_core + me->arch.got_offset;
7388+ got = me->module_core_rw + me->arch.got_offset;
7389 for (i = 0; got[i].addr; i++)
7390 if (got[i].addr == value)
7391 goto out;
7392@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7393 #ifdef CONFIG_64BIT
7394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7395 {
7396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7398
7399 if (!value) {
7400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7401@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7402
7403 /* Create new one */
7404 fdesc->addr = value;
7405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7407 return (Elf_Addr)fdesc;
7408 }
7409 #endif /* CONFIG_64BIT */
7410@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7411
7412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7413 end = table + sechdrs[me->arch.unwind_section].sh_size;
7414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7416
7417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7418 me->arch.unwind_section, table, end, gp);
7419diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7420index e1ffea2..46ed66e 100644
7421--- a/arch/parisc/kernel/sys_parisc.c
7422+++ b/arch/parisc/kernel/sys_parisc.c
7423@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7424 unsigned long task_size = TASK_SIZE;
7425 int do_color_align, last_mmap;
7426 struct vm_unmapped_area_info info;
7427+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7428
7429 if (len > task_size)
7430 return -ENOMEM;
7431@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 goto found_addr;
7433 }
7434
7435+#ifdef CONFIG_PAX_RANDMMAP
7436+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7437+#endif
7438+
7439 if (addr) {
7440 if (do_color_align && last_mmap)
7441 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7442@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7443 info.high_limit = mmap_upper_limit();
7444 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7445 info.align_offset = shared_align_offset(last_mmap, pgoff);
7446+ info.threadstack_offset = offset;
7447 addr = vm_unmapped_area(&info);
7448
7449 found_addr:
7450@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7451 unsigned long addr = addr0;
7452 int do_color_align, last_mmap;
7453 struct vm_unmapped_area_info info;
7454+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7455
7456 #ifdef CONFIG_64BIT
7457 /* This should only ever run for 32-bit processes. */
7458@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 }
7460
7461 /* requesting a specific address */
7462+#ifdef CONFIG_PAX_RANDMMAP
7463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7464+#endif
7465+
7466 if (addr) {
7467 if (do_color_align && last_mmap)
7468 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7469@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7470 info.high_limit = mm->mmap_base;
7471 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7472 info.align_offset = shared_align_offset(last_mmap, pgoff);
7473+ info.threadstack_offset = offset;
7474 addr = vm_unmapped_area(&info);
7475 if (!(addr & ~PAGE_MASK))
7476 goto found_addr;
7477@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7478 mm->mmap_legacy_base = mmap_legacy_base();
7479 mm->mmap_base = mmap_upper_limit();
7480
7481+#ifdef CONFIG_PAX_RANDMMAP
7482+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7483+ mm->mmap_legacy_base += mm->delta_mmap;
7484+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7485+ }
7486+#endif
7487+
7488 if (mmap_is_legacy()) {
7489 mm->mmap_base = mm->mmap_legacy_base;
7490 mm->get_unmapped_area = arch_get_unmapped_area;
7491diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7492index 47ee620..1107387 100644
7493--- a/arch/parisc/kernel/traps.c
7494+++ b/arch/parisc/kernel/traps.c
7495@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7496
7497 down_read(&current->mm->mmap_sem);
7498 vma = find_vma(current->mm,regs->iaoq[0]);
7499- if (vma && (regs->iaoq[0] >= vma->vm_start)
7500- && (vma->vm_flags & VM_EXEC)) {
7501-
7502+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7503 fault_address = regs->iaoq[0];
7504 fault_space = regs->iasq[0];
7505
7506diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7507index e5120e6..8ddb5cc 100644
7508--- a/arch/parisc/mm/fault.c
7509+++ b/arch/parisc/mm/fault.c
7510@@ -15,6 +15,7 @@
7511 #include <linux/sched.h>
7512 #include <linux/interrupt.h>
7513 #include <linux/module.h>
7514+#include <linux/unistd.h>
7515
7516 #include <asm/uaccess.h>
7517 #include <asm/traps.h>
7518@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7519 static unsigned long
7520 parisc_acctyp(unsigned long code, unsigned int inst)
7521 {
7522- if (code == 6 || code == 16)
7523+ if (code == 6 || code == 7 || code == 16)
7524 return VM_EXEC;
7525
7526 switch (inst & 0xf0000000) {
7527@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7528 }
7529 #endif
7530
7531+#ifdef CONFIG_PAX_PAGEEXEC
7532+/*
7533+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7534+ *
7535+ * returns 1 when task should be killed
7536+ * 2 when rt_sigreturn trampoline was detected
7537+ * 3 when unpatched PLT trampoline was detected
7538+ */
7539+static int pax_handle_fetch_fault(struct pt_regs *regs)
7540+{
7541+
7542+#ifdef CONFIG_PAX_EMUPLT
7543+ int err;
7544+
7545+ do { /* PaX: unpatched PLT emulation */
7546+ unsigned int bl, depwi;
7547+
7548+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7549+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7550+
7551+ if (err)
7552+ break;
7553+
7554+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7555+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7556+
7557+ err = get_user(ldw, (unsigned int *)addr);
7558+ err |= get_user(bv, (unsigned int *)(addr+4));
7559+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7560+
7561+ if (err)
7562+ break;
7563+
7564+ if (ldw == 0x0E801096U &&
7565+ bv == 0xEAC0C000U &&
7566+ ldw2 == 0x0E881095U)
7567+ {
7568+ unsigned int resolver, map;
7569+
7570+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7571+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7572+ if (err)
7573+ break;
7574+
7575+ regs->gr[20] = instruction_pointer(regs)+8;
7576+ regs->gr[21] = map;
7577+ regs->gr[22] = resolver;
7578+ regs->iaoq[0] = resolver | 3UL;
7579+ regs->iaoq[1] = regs->iaoq[0] + 4;
7580+ return 3;
7581+ }
7582+ }
7583+ } while (0);
7584+#endif
7585+
7586+#ifdef CONFIG_PAX_EMUTRAMP
7587+
7588+#ifndef CONFIG_PAX_EMUSIGRT
7589+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7590+ return 1;
7591+#endif
7592+
7593+ do { /* PaX: rt_sigreturn emulation */
7594+ unsigned int ldi1, ldi2, bel, nop;
7595+
7596+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7597+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7598+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7599+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7600+
7601+ if (err)
7602+ break;
7603+
7604+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7605+ ldi2 == 0x3414015AU &&
7606+ bel == 0xE4008200U &&
7607+ nop == 0x08000240U)
7608+ {
7609+ regs->gr[25] = (ldi1 & 2) >> 1;
7610+ regs->gr[20] = __NR_rt_sigreturn;
7611+ regs->gr[31] = regs->iaoq[1] + 16;
7612+ regs->sr[0] = regs->iasq[1];
7613+ regs->iaoq[0] = 0x100UL;
7614+ regs->iaoq[1] = regs->iaoq[0] + 4;
7615+ regs->iasq[0] = regs->sr[2];
7616+ regs->iasq[1] = regs->sr[2];
7617+ return 2;
7618+ }
7619+ } while (0);
7620+#endif
7621+
7622+ return 1;
7623+}
7624+
7625+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7626+{
7627+ unsigned long i;
7628+
7629+ printk(KERN_ERR "PAX: bytes at PC: ");
7630+ for (i = 0; i < 5; i++) {
7631+ unsigned int c;
7632+ if (get_user(c, (unsigned int *)pc+i))
7633+ printk(KERN_CONT "???????? ");
7634+ else
7635+ printk(KERN_CONT "%08x ", c);
7636+ }
7637+ printk("\n");
7638+}
7639+#endif
7640+
7641 int fixup_exception(struct pt_regs *regs)
7642 {
7643 const struct exception_table_entry *fix;
7644@@ -234,8 +345,33 @@ retry:
7645
7646 good_area:
7647
7648- if ((vma->vm_flags & acc_type) != acc_type)
7649+ if ((vma->vm_flags & acc_type) != acc_type) {
7650+
7651+#ifdef CONFIG_PAX_PAGEEXEC
7652+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7653+ (address & ~3UL) == instruction_pointer(regs))
7654+ {
7655+ up_read(&mm->mmap_sem);
7656+ switch (pax_handle_fetch_fault(regs)) {
7657+
7658+#ifdef CONFIG_PAX_EMUPLT
7659+ case 3:
7660+ return;
7661+#endif
7662+
7663+#ifdef CONFIG_PAX_EMUTRAMP
7664+ case 2:
7665+ return;
7666+#endif
7667+
7668+ }
7669+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7670+ do_group_exit(SIGKILL);
7671+ }
7672+#endif
7673+
7674 goto bad_area;
7675+ }
7676
7677 /*
7678 * If for any reason at all we couldn't handle the fault, make
7679diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7680index 22b0940..309f790 100644
7681--- a/arch/powerpc/Kconfig
7682+++ b/arch/powerpc/Kconfig
7683@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7684 config KEXEC
7685 bool "kexec system call"
7686 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7687+ depends on !GRKERNSEC_KMEM
7688 help
7689 kexec is a system call that implements the ability to shutdown your
7690 current kernel, and to start another kernel. It is like a reboot
7691diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7692index 512d278..d31fadd 100644
7693--- a/arch/powerpc/include/asm/atomic.h
7694+++ b/arch/powerpc/include/asm/atomic.h
7695@@ -12,6 +12,11 @@
7696
7697 #define ATOMIC_INIT(i) { (i) }
7698
7699+#define _ASM_EXTABLE(from, to) \
7700+" .section __ex_table,\"a\"\n" \
7701+ PPC_LONG" " #from ", " #to"\n" \
7702+" .previous\n"
7703+
7704 static __inline__ int atomic_read(const atomic_t *v)
7705 {
7706 int t;
7707@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7708 return t;
7709 }
7710
7711+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7712+{
7713+ int t;
7714+
7715+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7716+
7717+ return t;
7718+}
7719+
7720 static __inline__ void atomic_set(atomic_t *v, int i)
7721 {
7722 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7723 }
7724
7725-#define ATOMIC_OP(op, asm_op) \
7726-static __inline__ void atomic_##op(int a, atomic_t *v) \
7727+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7728+{
7729+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7730+}
7731+
7732+#ifdef CONFIG_PAX_REFCOUNT
7733+#define __REFCOUNT_OP(op) op##o.
7734+#define __OVERFLOW_PRE \
7735+ " mcrxr cr0\n"
7736+#define __OVERFLOW_POST \
7737+ " bf 4*cr0+so, 3f\n" \
7738+ "2: .long 0x00c00b00\n" \
7739+ "3:\n"
7740+#define __OVERFLOW_EXTABLE \
7741+ "\n4:\n"
7742+ _ASM_EXTABLE(2b, 4b)
7743+#else
7744+#define __REFCOUNT_OP(op) op
7745+#define __OVERFLOW_PRE
7746+#define __OVERFLOW_POST
7747+#define __OVERFLOW_EXTABLE
7748+#endif
7749+
7750+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7751+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7752 { \
7753 int t; \
7754 \
7755 __asm__ __volatile__( \
7756-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7757+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7758+ pre_op \
7759 #asm_op " %0,%2,%0\n" \
7760+ post_op \
7761 PPC405_ERR77(0,%3) \
7762 " stwcx. %0,0,%3 \n" \
7763 " bne- 1b\n" \
7764+ extable \
7765 : "=&r" (t), "+m" (v->counter) \
7766 : "r" (a), "r" (&v->counter) \
7767 : "cc"); \
7768 } \
7769
7770-#define ATOMIC_OP_RETURN(op, asm_op) \
7771-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7772+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7773+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7774+
7775+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7776+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7777 { \
7778 int t; \
7779 \
7780 __asm__ __volatile__( \
7781 PPC_ATOMIC_ENTRY_BARRIER \
7782-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7783+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7784+ pre_op \
7785 #asm_op " %0,%1,%0\n" \
7786+ post_op \
7787 PPC405_ERR77(0,%2) \
7788 " stwcx. %0,0,%2 \n" \
7789 " bne- 1b\n" \
7790+ extable \
7791 PPC_ATOMIC_EXIT_BARRIER \
7792 : "=&r" (t) \
7793 : "r" (a), "r" (&v->counter) \
7794@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7795 return t; \
7796 }
7797
7798+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7799+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7800+
7801 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7802
7803 ATOMIC_OPS(add, add)
7804@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7805
7806 #undef ATOMIC_OPS
7807 #undef ATOMIC_OP_RETURN
7808+#undef __ATOMIC_OP_RETURN
7809 #undef ATOMIC_OP
7810+#undef __ATOMIC_OP
7811
7812 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7813
7814-static __inline__ void atomic_inc(atomic_t *v)
7815-{
7816- int t;
7817+/*
7818+ * atomic_inc - increment atomic variable
7819+ * @v: pointer of type atomic_t
7820+ *
7821+ * Automatically increments @v by 1
7822+ */
7823+#define atomic_inc(v) atomic_add(1, (v))
7824+#define atomic_inc_return(v) atomic_add_return(1, (v))
7825
7826- __asm__ __volatile__(
7827-"1: lwarx %0,0,%2 # atomic_inc\n\
7828- addic %0,%0,1\n"
7829- PPC405_ERR77(0,%2)
7830-" stwcx. %0,0,%2 \n\
7831- bne- 1b"
7832- : "=&r" (t), "+m" (v->counter)
7833- : "r" (&v->counter)
7834- : "cc", "xer");
7835+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7836+{
7837+ atomic_add_unchecked(1, v);
7838 }
7839
7840-static __inline__ int atomic_inc_return(atomic_t *v)
7841+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7842 {
7843- int t;
7844-
7845- __asm__ __volatile__(
7846- PPC_ATOMIC_ENTRY_BARRIER
7847-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7848- addic %0,%0,1\n"
7849- PPC405_ERR77(0,%1)
7850-" stwcx. %0,0,%1 \n\
7851- bne- 1b"
7852- PPC_ATOMIC_EXIT_BARRIER
7853- : "=&r" (t)
7854- : "r" (&v->counter)
7855- : "cc", "xer", "memory");
7856-
7857- return t;
7858+ return atomic_add_return_unchecked(1, v);
7859 }
7860
7861 /*
7862@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7863 */
7864 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7865
7866-static __inline__ void atomic_dec(atomic_t *v)
7867+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7868 {
7869- int t;
7870-
7871- __asm__ __volatile__(
7872-"1: lwarx %0,0,%2 # atomic_dec\n\
7873- addic %0,%0,-1\n"
7874- PPC405_ERR77(0,%2)\
7875-" stwcx. %0,0,%2\n\
7876- bne- 1b"
7877- : "=&r" (t), "+m" (v->counter)
7878- : "r" (&v->counter)
7879- : "cc", "xer");
7880+ return atomic_add_return_unchecked(1, v) == 0;
7881 }
7882
7883-static __inline__ int atomic_dec_return(atomic_t *v)
7884+/*
7885+ * atomic_dec - decrement atomic variable
7886+ * @v: pointer of type atomic_t
7887+ *
7888+ * Atomically decrements @v by 1
7889+ */
7890+#define atomic_dec(v) atomic_sub(1, (v))
7891+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7892+
7893+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7894 {
7895- int t;
7896-
7897- __asm__ __volatile__(
7898- PPC_ATOMIC_ENTRY_BARRIER
7899-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7900- addic %0,%0,-1\n"
7901- PPC405_ERR77(0,%1)
7902-" stwcx. %0,0,%1\n\
7903- bne- 1b"
7904- PPC_ATOMIC_EXIT_BARRIER
7905- : "=&r" (t)
7906- : "r" (&v->counter)
7907- : "cc", "xer", "memory");
7908-
7909- return t;
7910+ atomic_sub_unchecked(1, v);
7911 }
7912
7913 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7915
7916+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7917+{
7918+ return cmpxchg(&(v->counter), old, new);
7919+}
7920+
7921+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7922+{
7923+ return xchg(&(v->counter), new);
7924+}
7925+
7926 /**
7927 * __atomic_add_unless - add unless the number is a given value
7928 * @v: pointer of type atomic_t
7929@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7930 PPC_ATOMIC_ENTRY_BARRIER
7931 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7932 cmpw 0,%0,%3 \n\
7933- beq- 2f \n\
7934- add %0,%2,%0 \n"
7935+ beq- 2f \n"
7936+
7937+#ifdef CONFIG_PAX_REFCOUNT
7938+" mcrxr cr0\n"
7939+" addo. %0,%2,%0\n"
7940+" bf 4*cr0+so, 4f\n"
7941+"3:.long " "0x00c00b00""\n"
7942+"4:\n"
7943+#else
7944+ "add %0,%2,%0 \n"
7945+#endif
7946+
7947 PPC405_ERR77(0,%2)
7948 " stwcx. %0,0,%1 \n\
7949 bne- 1b \n"
7950+"5:"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ _ASM_EXTABLE(3b, 5b)
7954+#endif
7955+
7956 PPC_ATOMIC_EXIT_BARRIER
7957 " subf %0,%2,%0 \n\
7958 2:"
7959@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7960 }
7961 #define atomic_dec_if_positive atomic_dec_if_positive
7962
7963+#define smp_mb__before_atomic_dec() smp_mb()
7964+#define smp_mb__after_atomic_dec() smp_mb()
7965+#define smp_mb__before_atomic_inc() smp_mb()
7966+#define smp_mb__after_atomic_inc() smp_mb()
7967+
7968 #ifdef __powerpc64__
7969
7970 #define ATOMIC64_INIT(i) { (i) }
7971@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7972 return t;
7973 }
7974
7975+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7976+{
7977+ long t;
7978+
7979+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7980+
7981+ return t;
7982+}
7983+
7984 static __inline__ void atomic64_set(atomic64_t *v, long i)
7985 {
7986 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7987 }
7988
7989-#define ATOMIC64_OP(op, asm_op) \
7990-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7991+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7992+{
7993+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7994+}
7995+
7996+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7997+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
7998 { \
7999 long t; \
8000 \
8001 __asm__ __volatile__( \
8002 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8003+ pre_op \
8004 #asm_op " %0,%2,%0\n" \
8005+ post_op \
8006 " stdcx. %0,0,%3 \n" \
8007 " bne- 1b\n" \
8008+ extable \
8009 : "=&r" (t), "+m" (v->counter) \
8010 : "r" (a), "r" (&v->counter) \
8011 : "cc"); \
8012 }
8013
8014-#define ATOMIC64_OP_RETURN(op, asm_op) \
8015-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8016+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8017+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8018+
8019+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8020+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8021 { \
8022 long t; \
8023 \
8024 __asm__ __volatile__( \
8025 PPC_ATOMIC_ENTRY_BARRIER \
8026 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8027+ pre_op \
8028 #asm_op " %0,%1,%0\n" \
8029+ post_op \
8030 " stdcx. %0,0,%2 \n" \
8031 " bne- 1b\n" \
8032+ extable \
8033 PPC_ATOMIC_EXIT_BARRIER \
8034 : "=&r" (t) \
8035 : "r" (a), "r" (&v->counter) \
8036@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8037 return t; \
8038 }
8039
8040+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8041+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8042+
8043 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8044
8045 ATOMIC64_OPS(add, add)
8046@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8047
8048 #undef ATOMIC64_OPS
8049 #undef ATOMIC64_OP_RETURN
8050+#undef __ATOMIC64_OP_RETURN
8051 #undef ATOMIC64_OP
8052+#undef __ATOMIC64_OP
8053+#undef __OVERFLOW_EXTABLE
8054+#undef __OVERFLOW_POST
8055+#undef __OVERFLOW_PRE
8056+#undef __REFCOUNT_OP
8057
8058 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8059
8060-static __inline__ void atomic64_inc(atomic64_t *v)
8061-{
8062- long t;
8063+/*
8064+ * atomic64_inc - increment atomic variable
8065+ * @v: pointer of type atomic64_t
8066+ *
8067+ * Automatically increments @v by 1
8068+ */
8069+#define atomic64_inc(v) atomic64_add(1, (v))
8070+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8071
8072- __asm__ __volatile__(
8073-"1: ldarx %0,0,%2 # atomic64_inc\n\
8074- addic %0,%0,1\n\
8075- stdcx. %0,0,%2 \n\
8076- bne- 1b"
8077- : "=&r" (t), "+m" (v->counter)
8078- : "r" (&v->counter)
8079- : "cc", "xer");
8080+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8081+{
8082+ atomic64_add_unchecked(1, v);
8083 }
8084
8085-static __inline__ long atomic64_inc_return(atomic64_t *v)
8086+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8087 {
8088- long t;
8089-
8090- __asm__ __volatile__(
8091- PPC_ATOMIC_ENTRY_BARRIER
8092-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8093- addic %0,%0,1\n\
8094- stdcx. %0,0,%1 \n\
8095- bne- 1b"
8096- PPC_ATOMIC_EXIT_BARRIER
8097- : "=&r" (t)
8098- : "r" (&v->counter)
8099- : "cc", "xer", "memory");
8100-
8101- return t;
8102+ return atomic64_add_return_unchecked(1, v);
8103 }
8104
8105 /*
8106@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8107 */
8108 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8109
8110-static __inline__ void atomic64_dec(atomic64_t *v)
8111+/*
8112+ * atomic64_dec - decrement atomic variable
8113+ * @v: pointer of type atomic64_t
8114+ *
8115+ * Atomically decrements @v by 1
8116+ */
8117+#define atomic64_dec(v) atomic64_sub(1, (v))
8118+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8119+
8120+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8121 {
8122- long t;
8123-
8124- __asm__ __volatile__(
8125-"1: ldarx %0,0,%2 # atomic64_dec\n\
8126- addic %0,%0,-1\n\
8127- stdcx. %0,0,%2\n\
8128- bne- 1b"
8129- : "=&r" (t), "+m" (v->counter)
8130- : "r" (&v->counter)
8131- : "cc", "xer");
8132-}
8133-
8134-static __inline__ long atomic64_dec_return(atomic64_t *v)
8135-{
8136- long t;
8137-
8138- __asm__ __volatile__(
8139- PPC_ATOMIC_ENTRY_BARRIER
8140-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8141- addic %0,%0,-1\n\
8142- stdcx. %0,0,%1\n\
8143- bne- 1b"
8144- PPC_ATOMIC_EXIT_BARRIER
8145- : "=&r" (t)
8146- : "r" (&v->counter)
8147- : "cc", "xer", "memory");
8148-
8149- return t;
8150+ atomic64_sub_unchecked(1, v);
8151 }
8152
8153 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8154@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8155 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8156 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8157
8158+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8159+{
8160+ return cmpxchg(&(v->counter), old, new);
8161+}
8162+
8163+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8164+{
8165+ return xchg(&(v->counter), new);
8166+}
8167+
8168 /**
8169 * atomic64_add_unless - add unless the number is a given value
8170 * @v: pointer of type atomic64_t
8171@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8172
8173 __asm__ __volatile__ (
8174 PPC_ATOMIC_ENTRY_BARRIER
8175-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8176+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8177 cmpd 0,%0,%3 \n\
8178- beq- 2f \n\
8179- add %0,%2,%0 \n"
8180+ beq- 2f \n"
8181+
8182+#ifdef CONFIG_PAX_REFCOUNT
8183+" mcrxr cr0\n"
8184+" addo. %0,%2,%0\n"
8185+" bf 4*cr0+so, 4f\n"
8186+"3:.long " "0x00c00b00""\n"
8187+"4:\n"
8188+#else
8189+ "add %0,%2,%0 \n"
8190+#endif
8191+
8192 " stdcx. %0,0,%1 \n\
8193 bne- 1b \n"
8194 PPC_ATOMIC_EXIT_BARRIER
8195+"5:"
8196+
8197+#ifdef CONFIG_PAX_REFCOUNT
8198+ _ASM_EXTABLE(3b, 5b)
8199+#endif
8200+
8201 " subf %0,%2,%0 \n\
8202 2:"
8203 : "=&r" (t)
8204diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8205index a3bf5be..e03ba81 100644
8206--- a/arch/powerpc/include/asm/barrier.h
8207+++ b/arch/powerpc/include/asm/barrier.h
8208@@ -76,7 +76,7 @@
8209 do { \
8210 compiletime_assert_atomic_type(*p); \
8211 smp_lwsync(); \
8212- ACCESS_ONCE(*p) = (v); \
8213+ ACCESS_ONCE_RW(*p) = (v); \
8214 } while (0)
8215
8216 #define smp_load_acquire(p) \
8217diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8218index 34a05a1..a1f2c67 100644
8219--- a/arch/powerpc/include/asm/cache.h
8220+++ b/arch/powerpc/include/asm/cache.h
8221@@ -4,6 +4,7 @@
8222 #ifdef __KERNEL__
8223
8224 #include <asm/reg.h>
8225+#include <linux/const.h>
8226
8227 /* bytes per L1 cache line */
8228 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8229@@ -23,7 +24,7 @@
8230 #define L1_CACHE_SHIFT 7
8231 #endif
8232
8233-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8234+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8235
8236 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8237
8238diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8239index 57d289a..b36c98c 100644
8240--- a/arch/powerpc/include/asm/elf.h
8241+++ b/arch/powerpc/include/asm/elf.h
8242@@ -30,6 +30,18 @@
8243
8244 #define ELF_ET_DYN_BASE 0x20000000
8245
8246+#ifdef CONFIG_PAX_ASLR
8247+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8248+
8249+#ifdef __powerpc64__
8250+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8251+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8252+#else
8253+#define PAX_DELTA_MMAP_LEN 15
8254+#define PAX_DELTA_STACK_LEN 15
8255+#endif
8256+#endif
8257+
8258 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8259
8260 /*
8261@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8262 (0x7ff >> (PAGE_SHIFT - 12)) : \
8263 (0x3ffff >> (PAGE_SHIFT - 12)))
8264
8265-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8266-#define arch_randomize_brk arch_randomize_brk
8267-
8268-
8269 #ifdef CONFIG_SPU_BASE
8270 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8271 #define NT_SPU 1
8272diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8273index 8196e9c..d83a9f3 100644
8274--- a/arch/powerpc/include/asm/exec.h
8275+++ b/arch/powerpc/include/asm/exec.h
8276@@ -4,6 +4,6 @@
8277 #ifndef _ASM_POWERPC_EXEC_H
8278 #define _ASM_POWERPC_EXEC_H
8279
8280-extern unsigned long arch_align_stack(unsigned long sp);
8281+#define arch_align_stack(x) ((x) & ~0xfUL)
8282
8283 #endif /* _ASM_POWERPC_EXEC_H */
8284diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8285index 5acabbd..7ea14fa 100644
8286--- a/arch/powerpc/include/asm/kmap_types.h
8287+++ b/arch/powerpc/include/asm/kmap_types.h
8288@@ -10,7 +10,7 @@
8289 * 2 of the License, or (at your option) any later version.
8290 */
8291
8292-#define KM_TYPE_NR 16
8293+#define KM_TYPE_NR 17
8294
8295 #endif /* __KERNEL__ */
8296 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8297diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8298index b8da913..c02b593 100644
8299--- a/arch/powerpc/include/asm/local.h
8300+++ b/arch/powerpc/include/asm/local.h
8301@@ -9,21 +9,65 @@ typedef struct
8302 atomic_long_t a;
8303 } local_t;
8304
8305+typedef struct
8306+{
8307+ atomic_long_unchecked_t a;
8308+} local_unchecked_t;
8309+
8310 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8311
8312 #define local_read(l) atomic_long_read(&(l)->a)
8313+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8314 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8315+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8316
8317 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8318+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8319 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8320+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8321 #define local_inc(l) atomic_long_inc(&(l)->a)
8322+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8323 #define local_dec(l) atomic_long_dec(&(l)->a)
8324+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8325
8326 static __inline__ long local_add_return(long a, local_t *l)
8327 {
8328 long t;
8329
8330 __asm__ __volatile__(
8331+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8332+
8333+#ifdef CONFIG_PAX_REFCOUNT
8334+" mcrxr cr0\n"
8335+" addo. %0,%1,%0\n"
8336+" bf 4*cr0+so, 3f\n"
8337+"2:.long " "0x00c00b00""\n"
8338+#else
8339+" add %0,%1,%0\n"
8340+#endif
8341+
8342+"3:\n"
8343+ PPC405_ERR77(0,%2)
8344+ PPC_STLCX "%0,0,%2 \n\
8345+ bne- 1b"
8346+
8347+#ifdef CONFIG_PAX_REFCOUNT
8348+"\n4:\n"
8349+ _ASM_EXTABLE(2b, 4b)
8350+#endif
8351+
8352+ : "=&r" (t)
8353+ : "r" (a), "r" (&(l->a.counter))
8354+ : "cc", "memory");
8355+
8356+ return t;
8357+}
8358+
8359+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8360+{
8361+ long t;
8362+
8363+ __asm__ __volatile__(
8364 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8365 add %0,%1,%0\n"
8366 PPC405_ERR77(0,%2)
8367@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8368
8369 #define local_cmpxchg(l, o, n) \
8370 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8371+#define local_cmpxchg_unchecked(l, o, n) \
8372+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8373 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8374
8375 /**
8376diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8377index 8565c25..2865190 100644
8378--- a/arch/powerpc/include/asm/mman.h
8379+++ b/arch/powerpc/include/asm/mman.h
8380@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8381 }
8382 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8383
8384-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8385+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8386 {
8387 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8388 }
8389diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8390index 69c0598..2c56964 100644
8391--- a/arch/powerpc/include/asm/page.h
8392+++ b/arch/powerpc/include/asm/page.h
8393@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8394 * and needs to be executable. This means the whole heap ends
8395 * up being executable.
8396 */
8397-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8398- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8399+#define VM_DATA_DEFAULT_FLAGS32 \
8400+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8401+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8402
8403 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8404 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8405@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8406 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8407 #endif
8408
8409+#define ktla_ktva(addr) (addr)
8410+#define ktva_ktla(addr) (addr)
8411+
8412 #ifndef CONFIG_PPC_BOOK3S_64
8413 /*
8414 * Use the top bit of the higher-level page table entries to indicate whether
8415diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8416index d908a46..3753f71 100644
8417--- a/arch/powerpc/include/asm/page_64.h
8418+++ b/arch/powerpc/include/asm/page_64.h
8419@@ -172,15 +172,18 @@ do { \
8420 * stack by default, so in the absence of a PT_GNU_STACK program header
8421 * we turn execute permission off.
8422 */
8423-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8424- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8425+#define VM_STACK_DEFAULT_FLAGS32 \
8426+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8427+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8428
8429 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8430 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8431
8432+#ifndef CONFIG_PAX_PAGEEXEC
8433 #define VM_STACK_DEFAULT_FLAGS \
8434 (is_32bit_task() ? \
8435 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8436+#endif
8437
8438 #include <asm-generic/getorder.h>
8439
8440diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8441index 4b0be20..c15a27d 100644
8442--- a/arch/powerpc/include/asm/pgalloc-64.h
8443+++ b/arch/powerpc/include/asm/pgalloc-64.h
8444@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8445 #ifndef CONFIG_PPC_64K_PAGES
8446
8447 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8448+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8449
8450 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8451 {
8452@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8453 pud_set(pud, (unsigned long)pmd);
8454 }
8455
8456+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8457+{
8458+ pud_populate(mm, pud, pmd);
8459+}
8460+
8461 #define pmd_populate(mm, pmd, pte_page) \
8462 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8463 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8464@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8465 #endif
8466
8467 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8468+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8469
8470 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8471 pte_t *pte)
8472diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8473index 9835ac4..900430f 100644
8474--- a/arch/powerpc/include/asm/pgtable.h
8475+++ b/arch/powerpc/include/asm/pgtable.h
8476@@ -2,6 +2,7 @@
8477 #define _ASM_POWERPC_PGTABLE_H
8478 #ifdef __KERNEL__
8479
8480+#include <linux/const.h>
8481 #ifndef __ASSEMBLY__
8482 #include <linux/mmdebug.h>
8483 #include <linux/mmzone.h>
8484diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8485index 62cfb0c..50c6402 100644
8486--- a/arch/powerpc/include/asm/pte-hash32.h
8487+++ b/arch/powerpc/include/asm/pte-hash32.h
8488@@ -20,6 +20,7 @@
8489 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8490 #define _PAGE_USER 0x004 /* usermode access allowed */
8491 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8492+#define _PAGE_EXEC _PAGE_GUARDED
8493 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8494 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8495 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8496diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8497index af56b5c..f86f3f6 100644
8498--- a/arch/powerpc/include/asm/reg.h
8499+++ b/arch/powerpc/include/asm/reg.h
8500@@ -253,6 +253,7 @@
8501 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8502 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8503 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8504+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8505 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8506 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8507 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8508diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8509index d607df5..08dc9ae 100644
8510--- a/arch/powerpc/include/asm/smp.h
8511+++ b/arch/powerpc/include/asm/smp.h
8512@@ -51,7 +51,7 @@ struct smp_ops_t {
8513 int (*cpu_disable)(void);
8514 void (*cpu_die)(unsigned int nr);
8515 int (*cpu_bootable)(unsigned int nr);
8516-};
8517+} __no_const;
8518
8519 extern void smp_send_debugger_break(void);
8520 extern void start_secondary_resume(void);
8521diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8522index 4dbe072..b803275 100644
8523--- a/arch/powerpc/include/asm/spinlock.h
8524+++ b/arch/powerpc/include/asm/spinlock.h
8525@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8526 __asm__ __volatile__(
8527 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8528 __DO_SIGN_EXTEND
8529-" addic. %0,%0,1\n\
8530- ble- 2f\n"
8531+
8532+#ifdef CONFIG_PAX_REFCOUNT
8533+" mcrxr cr0\n"
8534+" addico. %0,%0,1\n"
8535+" bf 4*cr0+so, 3f\n"
8536+"2:.long " "0x00c00b00""\n"
8537+#else
8538+" addic. %0,%0,1\n"
8539+#endif
8540+
8541+"3:\n"
8542+ "ble- 4f\n"
8543 PPC405_ERR77(0,%1)
8544 " stwcx. %0,0,%1\n\
8545 bne- 1b\n"
8546 PPC_ACQUIRE_BARRIER
8547-"2:" : "=&r" (tmp)
8548+"4:"
8549+
8550+#ifdef CONFIG_PAX_REFCOUNT
8551+ _ASM_EXTABLE(2b,4b)
8552+#endif
8553+
8554+ : "=&r" (tmp)
8555 : "r" (&rw->lock)
8556 : "cr0", "xer", "memory");
8557
8558@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8559 __asm__ __volatile__(
8560 "# read_unlock\n\t"
8561 PPC_RELEASE_BARRIER
8562-"1: lwarx %0,0,%1\n\
8563- addic %0,%0,-1\n"
8564+"1: lwarx %0,0,%1\n"
8565+
8566+#ifdef CONFIG_PAX_REFCOUNT
8567+" mcrxr cr0\n"
8568+" addico. %0,%0,-1\n"
8569+" bf 4*cr0+so, 3f\n"
8570+"2:.long " "0x00c00b00""\n"
8571+#else
8572+" addic. %0,%0,-1\n"
8573+#endif
8574+
8575+"3:\n"
8576 PPC405_ERR77(0,%1)
8577 " stwcx. %0,0,%1\n\
8578 bne- 1b"
8579+
8580+#ifdef CONFIG_PAX_REFCOUNT
8581+"\n4:\n"
8582+ _ASM_EXTABLE(2b, 4b)
8583+#endif
8584+
8585 : "=&r"(tmp)
8586 : "r"(&rw->lock)
8587 : "cr0", "xer", "memory");
8588diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8589index 7248979..80b75de 100644
8590--- a/arch/powerpc/include/asm/thread_info.h
8591+++ b/arch/powerpc/include/asm/thread_info.h
8592@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
8593 #if defined(CONFIG_PPC64)
8594 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8595 #endif
8596+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8597+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8598
8599 /* as above, but as bit values */
8600 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8601@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
8602 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8603 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8604 #define _TIF_NOHZ (1<<TIF_NOHZ)
8605+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8606 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8607 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8608- _TIF_NOHZ)
8609+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8610
8611 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8612 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8613diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8614index a0c071d..49cdc7f 100644
8615--- a/arch/powerpc/include/asm/uaccess.h
8616+++ b/arch/powerpc/include/asm/uaccess.h
8617@@ -58,6 +58,7 @@
8618
8619 #endif
8620
8621+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8622 #define access_ok(type, addr, size) \
8623 (__chk_user_ptr(addr), \
8624 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8625@@ -318,52 +319,6 @@ do { \
8626 extern unsigned long __copy_tofrom_user(void __user *to,
8627 const void __user *from, unsigned long size);
8628
8629-#ifndef __powerpc64__
8630-
8631-static inline unsigned long copy_from_user(void *to,
8632- const void __user *from, unsigned long n)
8633-{
8634- unsigned long over;
8635-
8636- if (access_ok(VERIFY_READ, from, n))
8637- return __copy_tofrom_user((__force void __user *)to, from, n);
8638- if ((unsigned long)from < TASK_SIZE) {
8639- over = (unsigned long)from + n - TASK_SIZE;
8640- return __copy_tofrom_user((__force void __user *)to, from,
8641- n - over) + over;
8642- }
8643- return n;
8644-}
8645-
8646-static inline unsigned long copy_to_user(void __user *to,
8647- const void *from, unsigned long n)
8648-{
8649- unsigned long over;
8650-
8651- if (access_ok(VERIFY_WRITE, to, n))
8652- return __copy_tofrom_user(to, (__force void __user *)from, n);
8653- if ((unsigned long)to < TASK_SIZE) {
8654- over = (unsigned long)to + n - TASK_SIZE;
8655- return __copy_tofrom_user(to, (__force void __user *)from,
8656- n - over) + over;
8657- }
8658- return n;
8659-}
8660-
8661-#else /* __powerpc64__ */
8662-
8663-#define __copy_in_user(to, from, size) \
8664- __copy_tofrom_user((to), (from), (size))
8665-
8666-extern unsigned long copy_from_user(void *to, const void __user *from,
8667- unsigned long n);
8668-extern unsigned long copy_to_user(void __user *to, const void *from,
8669- unsigned long n);
8670-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8671- unsigned long n);
8672-
8673-#endif /* __powerpc64__ */
8674-
8675 static inline unsigned long __copy_from_user_inatomic(void *to,
8676 const void __user *from, unsigned long n)
8677 {
8678@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8679 if (ret == 0)
8680 return 0;
8681 }
8682+
8683+ if (!__builtin_constant_p(n))
8684+ check_object_size(to, n, false);
8685+
8686 return __copy_tofrom_user((__force void __user *)to, from, n);
8687 }
8688
8689@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8690 if (ret == 0)
8691 return 0;
8692 }
8693+
8694+ if (!__builtin_constant_p(n))
8695+ check_object_size(from, n, true);
8696+
8697 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8698 }
8699
8700@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8701 return __copy_to_user_inatomic(to, from, size);
8702 }
8703
8704+#ifndef __powerpc64__
8705+
8706+static inline unsigned long __must_check copy_from_user(void *to,
8707+ const void __user *from, unsigned long n)
8708+{
8709+ unsigned long over;
8710+
8711+ if ((long)n < 0)
8712+ return n;
8713+
8714+ if (access_ok(VERIFY_READ, from, n)) {
8715+ if (!__builtin_constant_p(n))
8716+ check_object_size(to, n, false);
8717+ return __copy_tofrom_user((__force void __user *)to, from, n);
8718+ }
8719+ if ((unsigned long)from < TASK_SIZE) {
8720+ over = (unsigned long)from + n - TASK_SIZE;
8721+ if (!__builtin_constant_p(n - over))
8722+ check_object_size(to, n - over, false);
8723+ return __copy_tofrom_user((__force void __user *)to, from,
8724+ n - over) + over;
8725+ }
8726+ return n;
8727+}
8728+
8729+static inline unsigned long __must_check copy_to_user(void __user *to,
8730+ const void *from, unsigned long n)
8731+{
8732+ unsigned long over;
8733+
8734+ if ((long)n < 0)
8735+ return n;
8736+
8737+ if (access_ok(VERIFY_WRITE, to, n)) {
8738+ if (!__builtin_constant_p(n))
8739+ check_object_size(from, n, true);
8740+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8741+ }
8742+ if ((unsigned long)to < TASK_SIZE) {
8743+ over = (unsigned long)to + n - TASK_SIZE;
8744+ if (!__builtin_constant_p(n))
8745+ check_object_size(from, n - over, true);
8746+ return __copy_tofrom_user(to, (__force void __user *)from,
8747+ n - over) + over;
8748+ }
8749+ return n;
8750+}
8751+
8752+#else /* __powerpc64__ */
8753+
8754+#define __copy_in_user(to, from, size) \
8755+ __copy_tofrom_user((to), (from), (size))
8756+
8757+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8758+{
8759+ if ((long)n < 0 || n > INT_MAX)
8760+ return n;
8761+
8762+ if (!__builtin_constant_p(n))
8763+ check_object_size(to, n, false);
8764+
8765+ if (likely(access_ok(VERIFY_READ, from, n)))
8766+ n = __copy_from_user(to, from, n);
8767+ else
8768+ memset(to, 0, n);
8769+ return n;
8770+}
8771+
8772+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8773+{
8774+ if ((long)n < 0 || n > INT_MAX)
8775+ return n;
8776+
8777+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8778+ if (!__builtin_constant_p(n))
8779+ check_object_size(from, n, true);
8780+ n = __copy_to_user(to, from, n);
8781+ }
8782+ return n;
8783+}
8784+
8785+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8786+ unsigned long n);
8787+
8788+#endif /* __powerpc64__ */
8789+
8790 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8791
8792 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8793diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8794index 502cf69..53936a1 100644
8795--- a/arch/powerpc/kernel/Makefile
8796+++ b/arch/powerpc/kernel/Makefile
8797@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8798 CFLAGS_btext.o += -fPIC
8799 endif
8800
8801+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8802+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8803+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8804+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8805+
8806 ifdef CONFIG_FUNCTION_TRACER
8807 # Do not trace early boot code
8808 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8809@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8810 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8811 endif
8812
8813+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8814+
8815 obj-y := cputable.o ptrace.o syscalls.o \
8816 irq.o align.o signal_32.o pmc.o vdso.o \
8817 process.o systbl.o idle.o \
8818diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8819index 3e68d1c..72a5ee6 100644
8820--- a/arch/powerpc/kernel/exceptions-64e.S
8821+++ b/arch/powerpc/kernel/exceptions-64e.S
8822@@ -1010,6 +1010,7 @@ storage_fault_common:
8823 std r14,_DAR(r1)
8824 std r15,_DSISR(r1)
8825 addi r3,r1,STACK_FRAME_OVERHEAD
8826+ bl save_nvgprs
8827 mr r4,r14
8828 mr r5,r15
8829 ld r14,PACA_EXGEN+EX_R14(r13)
8830@@ -1018,8 +1019,7 @@ storage_fault_common:
8831 cmpdi r3,0
8832 bne- 1f
8833 b ret_from_except_lite
8834-1: bl save_nvgprs
8835- mr r5,r3
8836+1: mr r5,r3
8837 addi r3,r1,STACK_FRAME_OVERHEAD
8838 ld r4,_DAR(r1)
8839 bl bad_page_fault
8840diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8841index 9519e6b..13f6c38 100644
8842--- a/arch/powerpc/kernel/exceptions-64s.S
8843+++ b/arch/powerpc/kernel/exceptions-64s.S
8844@@ -1599,10 +1599,10 @@ handle_page_fault:
8845 11: ld r4,_DAR(r1)
8846 ld r5,_DSISR(r1)
8847 addi r3,r1,STACK_FRAME_OVERHEAD
8848+ bl save_nvgprs
8849 bl do_page_fault
8850 cmpdi r3,0
8851 beq+ 12f
8852- bl save_nvgprs
8853 mr r5,r3
8854 addi r3,r1,STACK_FRAME_OVERHEAD
8855 lwz r4,_DAR(r1)
8856diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8857index 4509603..cdb491f 100644
8858--- a/arch/powerpc/kernel/irq.c
8859+++ b/arch/powerpc/kernel/irq.c
8860@@ -460,6 +460,8 @@ void migrate_irqs(void)
8861 }
8862 #endif
8863
8864+extern void gr_handle_kernel_exploit(void);
8865+
8866 static inline void check_stack_overflow(void)
8867 {
8868 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8869@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8870 pr_err("do_IRQ: stack overflow: %ld\n",
8871 sp - sizeof(struct thread_info));
8872 dump_stack();
8873+ gr_handle_kernel_exploit();
8874 }
8875 #endif
8876 }
8877diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8878index c94d2e0..992a9ce 100644
8879--- a/arch/powerpc/kernel/module_32.c
8880+++ b/arch/powerpc/kernel/module_32.c
8881@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8882 me->arch.core_plt_section = i;
8883 }
8884 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8885- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8886+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8887 return -ENOEXEC;
8888 }
8889
8890@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8891
8892 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8893 /* Init, or core PLT? */
8894- if (location >= mod->module_core
8895- && location < mod->module_core + mod->core_size)
8896+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8897+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8898 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8899- else
8900+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8901+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8902 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8903+ else {
8904+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8905+ return ~0UL;
8906+ }
8907
8908 /* Find this entry, or if that fails, the next avail. entry */
8909 while (entry->jump[0]) {
8910@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8911 }
8912 #ifdef CONFIG_DYNAMIC_FTRACE
8913 module->arch.tramp =
8914- do_plt_call(module->module_core,
8915+ do_plt_call(module->module_core_rx,
8916 (unsigned long)ftrace_caller,
8917 sechdrs, module);
8918 #endif
8919diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8920index b4cc7be..1fe8bb3 100644
8921--- a/arch/powerpc/kernel/process.c
8922+++ b/arch/powerpc/kernel/process.c
8923@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8924 * Lookup NIP late so we have the best change of getting the
8925 * above info out without failing
8926 */
8927- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8928- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8929+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8930+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8931 #endif
8932 show_stack(current, (unsigned long *) regs->gpr[1]);
8933 if (!user_mode(regs))
8934@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8935 newsp = stack[0];
8936 ip = stack[STACK_FRAME_LR_SAVE];
8937 if (!firstframe || ip != lr) {
8938- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8939+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8940 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8941 if ((ip == rth) && curr_frame >= 0) {
8942- printk(" (%pS)",
8943+ printk(" (%pA)",
8944 (void *)current->ret_stack[curr_frame].ret);
8945 curr_frame--;
8946 }
8947@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8948 struct pt_regs *regs = (struct pt_regs *)
8949 (sp + STACK_FRAME_OVERHEAD);
8950 lr = regs->link;
8951- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8952+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8953 regs->trap, (void *)regs->nip, (void *)lr);
8954 firstframe = 1;
8955 }
8956@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8957 mtspr(SPRN_CTRLT, ctrl);
8958 }
8959 #endif /* CONFIG_PPC64 */
8960-
8961-unsigned long arch_align_stack(unsigned long sp)
8962-{
8963- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8964- sp -= get_random_int() & ~PAGE_MASK;
8965- return sp & ~0xf;
8966-}
8967-
8968-static inline unsigned long brk_rnd(void)
8969-{
8970- unsigned long rnd = 0;
8971-
8972- /* 8MB for 32bit, 1GB for 64bit */
8973- if (is_32bit_task())
8974- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8975- else
8976- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8977-
8978- return rnd << PAGE_SHIFT;
8979-}
8980-
8981-unsigned long arch_randomize_brk(struct mm_struct *mm)
8982-{
8983- unsigned long base = mm->brk;
8984- unsigned long ret;
8985-
8986-#ifdef CONFIG_PPC_STD_MMU_64
8987- /*
8988- * If we are using 1TB segments and we are allowed to randomise
8989- * the heap, we can put it above 1TB so it is backed by a 1TB
8990- * segment. Otherwise the heap will be in the bottom 1TB
8991- * which always uses 256MB segments and this may result in a
8992- * performance penalty.
8993- */
8994- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8995- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8996-#endif
8997-
8998- ret = PAGE_ALIGN(base + brk_rnd());
8999-
9000- if (ret < mm->brk)
9001- return mm->brk;
9002-
9003- return ret;
9004-}
9005-
9006diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9007index f21897b..28c0428 100644
9008--- a/arch/powerpc/kernel/ptrace.c
9009+++ b/arch/powerpc/kernel/ptrace.c
9010@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9011 return ret;
9012 }
9013
9014+#ifdef CONFIG_GRKERNSEC_SETXID
9015+extern void gr_delayed_cred_worker(void);
9016+#endif
9017+
9018 /*
9019 * We must return the syscall number to actually look up in the table.
9020 * This can be -1L to skip running any syscall at all.
9021@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9022
9023 secure_computing_strict(regs->gpr[0]);
9024
9025+#ifdef CONFIG_GRKERNSEC_SETXID
9026+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9027+ gr_delayed_cred_worker();
9028+#endif
9029+
9030 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9031 tracehook_report_syscall_entry(regs))
9032 /*
9033@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9034 {
9035 int step;
9036
9037+#ifdef CONFIG_GRKERNSEC_SETXID
9038+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9039+ gr_delayed_cred_worker();
9040+#endif
9041+
9042 audit_syscall_exit(regs);
9043
9044 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9045diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9046index d3a831a..3a33123 100644
9047--- a/arch/powerpc/kernel/signal_32.c
9048+++ b/arch/powerpc/kernel/signal_32.c
9049@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9050 /* Save user registers on the stack */
9051 frame = &rt_sf->uc.uc_mcontext;
9052 addr = frame;
9053- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9054+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9055 sigret = 0;
9056 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9057 } else {
9058diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9059index c7c24d2..1bf7039 100644
9060--- a/arch/powerpc/kernel/signal_64.c
9061+++ b/arch/powerpc/kernel/signal_64.c
9062@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9063 current->thread.fp_state.fpscr = 0;
9064
9065 /* Set up to return from userspace. */
9066- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9067+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9068 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9069 } else {
9070 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9071diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9072index 19e4744..28a8d7b 100644
9073--- a/arch/powerpc/kernel/traps.c
9074+++ b/arch/powerpc/kernel/traps.c
9075@@ -36,6 +36,7 @@
9076 #include <linux/debugfs.h>
9077 #include <linux/ratelimit.h>
9078 #include <linux/context_tracking.h>
9079+#include <linux/uaccess.h>
9080
9081 #include <asm/emulated_ops.h>
9082 #include <asm/pgtable.h>
9083@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9084 return flags;
9085 }
9086
9087+extern void gr_handle_kernel_exploit(void);
9088+
9089 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9090 int signr)
9091 {
9092@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9093 panic("Fatal exception in interrupt");
9094 if (panic_on_oops)
9095 panic("Fatal exception");
9096+
9097+ gr_handle_kernel_exploit();
9098+
9099 do_exit(signr);
9100 }
9101
9102@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9103 enum ctx_state prev_state = exception_enter();
9104 unsigned int reason = get_reason(regs);
9105
9106+#ifdef CONFIG_PAX_REFCOUNT
9107+ unsigned int bkpt;
9108+ const struct exception_table_entry *entry;
9109+
9110+ if (reason & REASON_ILLEGAL) {
9111+ /* Check if PaX bad instruction */
9112+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9113+ current->thread.trap_nr = 0;
9114+ pax_report_refcount_overflow(regs);
9115+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9116+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9117+ regs->nip = entry->fixup;
9118+ return;
9119+ }
9120+ /* fixup_exception() could not handle */
9121+ goto bail;
9122+ }
9123+ }
9124+#endif
9125+
9126 /* We can now get here via a FP Unavailable exception if the core
9127 * has no FPU, in that case the reason flags will be 0 */
9128
9129diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9130index 305eb0d..accc5b40 100644
9131--- a/arch/powerpc/kernel/vdso.c
9132+++ b/arch/powerpc/kernel/vdso.c
9133@@ -34,6 +34,7 @@
9134 #include <asm/vdso.h>
9135 #include <asm/vdso_datapage.h>
9136 #include <asm/setup.h>
9137+#include <asm/mman.h>
9138
9139 #undef DEBUG
9140
9141@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9142 vdso_base = VDSO32_MBASE;
9143 #endif
9144
9145- current->mm->context.vdso_base = 0;
9146+ current->mm->context.vdso_base = ~0UL;
9147
9148 /* vDSO has a problem and was disabled, just don't "enable" it for the
9149 * process
9150@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9151 vdso_base = get_unmapped_area(NULL, vdso_base,
9152 (vdso_pages << PAGE_SHIFT) +
9153 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9154- 0, 0);
9155+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9156 if (IS_ERR_VALUE(vdso_base)) {
9157 rc = vdso_base;
9158 goto fail_mmapsem;
9159diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9160index 27c0fac..6ec4a32 100644
9161--- a/arch/powerpc/kvm/powerpc.c
9162+++ b/arch/powerpc/kvm/powerpc.c
9163@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9164 }
9165 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9166
9167-int kvm_arch_init(void *opaque)
9168+int kvm_arch_init(const void *opaque)
9169 {
9170 return 0;
9171 }
9172diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9173index 5eea6f3..5d10396 100644
9174--- a/arch/powerpc/lib/usercopy_64.c
9175+++ b/arch/powerpc/lib/usercopy_64.c
9176@@ -9,22 +9,6 @@
9177 #include <linux/module.h>
9178 #include <asm/uaccess.h>
9179
9180-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9181-{
9182- if (likely(access_ok(VERIFY_READ, from, n)))
9183- n = __copy_from_user(to, from, n);
9184- else
9185- memset(to, 0, n);
9186- return n;
9187-}
9188-
9189-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9190-{
9191- if (likely(access_ok(VERIFY_WRITE, to, n)))
9192- n = __copy_to_user(to, from, n);
9193- return n;
9194-}
9195-
9196 unsigned long copy_in_user(void __user *to, const void __user *from,
9197 unsigned long n)
9198 {
9199@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9200 return n;
9201 }
9202
9203-EXPORT_SYMBOL(copy_from_user);
9204-EXPORT_SYMBOL(copy_to_user);
9205 EXPORT_SYMBOL(copy_in_user);
9206
9207diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9208index b396868..3eb6b9f 100644
9209--- a/arch/powerpc/mm/fault.c
9210+++ b/arch/powerpc/mm/fault.c
9211@@ -33,6 +33,10 @@
9212 #include <linux/ratelimit.h>
9213 #include <linux/context_tracking.h>
9214 #include <linux/hugetlb.h>
9215+#include <linux/slab.h>
9216+#include <linux/pagemap.h>
9217+#include <linux/compiler.h>
9218+#include <linux/unistd.h>
9219
9220 #include <asm/firmware.h>
9221 #include <asm/page.h>
9222@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9223 }
9224 #endif
9225
9226+#ifdef CONFIG_PAX_PAGEEXEC
9227+/*
9228+ * PaX: decide what to do with offenders (regs->nip = fault address)
9229+ *
9230+ * returns 1 when task should be killed
9231+ */
9232+static int pax_handle_fetch_fault(struct pt_regs *regs)
9233+{
9234+ return 1;
9235+}
9236+
9237+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9238+{
9239+ unsigned long i;
9240+
9241+ printk(KERN_ERR "PAX: bytes at PC: ");
9242+ for (i = 0; i < 5; i++) {
9243+ unsigned int c;
9244+ if (get_user(c, (unsigned int __user *)pc+i))
9245+ printk(KERN_CONT "???????? ");
9246+ else
9247+ printk(KERN_CONT "%08x ", c);
9248+ }
9249+ printk("\n");
9250+}
9251+#endif
9252+
9253 /*
9254 * Check whether the instruction at regs->nip is a store using
9255 * an update addressing form which will update r1.
9256@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9257 * indicate errors in DSISR but can validly be set in SRR1.
9258 */
9259 if (trap == 0x400)
9260- error_code &= 0x48200000;
9261+ error_code &= 0x58200000;
9262 else
9263 is_write = error_code & DSISR_ISSTORE;
9264 #else
9265@@ -383,12 +414,16 @@ good_area:
9266 * "undefined". Of those that can be set, this is the only
9267 * one which seems bad.
9268 */
9269- if (error_code & 0x10000000)
9270+ if (error_code & DSISR_GUARDED)
9271 /* Guarded storage error. */
9272 goto bad_area;
9273 #endif /* CONFIG_8xx */
9274
9275 if (is_exec) {
9276+#ifdef CONFIG_PPC_STD_MMU
9277+ if (error_code & DSISR_GUARDED)
9278+ goto bad_area;
9279+#endif
9280 /*
9281 * Allow execution from readable areas if the MMU does not
9282 * provide separate controls over reading and executing.
9283@@ -483,6 +518,23 @@ bad_area:
9284 bad_area_nosemaphore:
9285 /* User mode accesses cause a SIGSEGV */
9286 if (user_mode(regs)) {
9287+
9288+#ifdef CONFIG_PAX_PAGEEXEC
9289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9290+#ifdef CONFIG_PPC_STD_MMU
9291+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9292+#else
9293+ if (is_exec && regs->nip == address) {
9294+#endif
9295+ switch (pax_handle_fetch_fault(regs)) {
9296+ }
9297+
9298+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9299+ do_group_exit(SIGKILL);
9300+ }
9301+ }
9302+#endif
9303+
9304 _exception(SIGSEGV, regs, code, address);
9305 goto bail;
9306 }
9307diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9308index cb8bdbe..cde4bc7 100644
9309--- a/arch/powerpc/mm/mmap.c
9310+++ b/arch/powerpc/mm/mmap.c
9311@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9312 return sysctl_legacy_va_layout;
9313 }
9314
9315-static unsigned long mmap_rnd(void)
9316+static unsigned long mmap_rnd(struct mm_struct *mm)
9317 {
9318 unsigned long rnd = 0;
9319
9320+#ifdef CONFIG_PAX_RANDMMAP
9321+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9322+#endif
9323+
9324 if (current->flags & PF_RANDOMIZE) {
9325 /* 8MB for 32bit, 1GB for 64bit */
9326 if (is_32bit_task())
9327@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9328 return rnd << PAGE_SHIFT;
9329 }
9330
9331-static inline unsigned long mmap_base(void)
9332+static inline unsigned long mmap_base(struct mm_struct *mm)
9333 {
9334 unsigned long gap = rlimit(RLIMIT_STACK);
9335
9336@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9337 else if (gap > MAX_GAP)
9338 gap = MAX_GAP;
9339
9340- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9341+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9342 }
9343
9344 /*
9345@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9346 */
9347 if (mmap_is_legacy()) {
9348 mm->mmap_base = TASK_UNMAPPED_BASE;
9349+
9350+#ifdef CONFIG_PAX_RANDMMAP
9351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9352+ mm->mmap_base += mm->delta_mmap;
9353+#endif
9354+
9355 mm->get_unmapped_area = arch_get_unmapped_area;
9356 } else {
9357- mm->mmap_base = mmap_base();
9358+ mm->mmap_base = mmap_base(mm);
9359+
9360+#ifdef CONFIG_PAX_RANDMMAP
9361+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9362+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9363+#endif
9364+
9365 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9366 }
9367 }
9368diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9369index 0f432a7..abfe841 100644
9370--- a/arch/powerpc/mm/slice.c
9371+++ b/arch/powerpc/mm/slice.c
9372@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9373 if ((mm->task_size - len) < addr)
9374 return 0;
9375 vma = find_vma(mm, addr);
9376- return (!vma || (addr + len) <= vma->vm_start);
9377+ return check_heap_stack_gap(vma, addr, len, 0);
9378 }
9379
9380 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9381@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9382 info.align_offset = 0;
9383
9384 addr = TASK_UNMAPPED_BASE;
9385+
9386+#ifdef CONFIG_PAX_RANDMMAP
9387+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9388+ addr += mm->delta_mmap;
9389+#endif
9390+
9391 while (addr < TASK_SIZE) {
9392 info.low_limit = addr;
9393 if (!slice_scan_available(addr, available, 1, &addr))
9394@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9395 if (fixed && addr > (mm->task_size - len))
9396 return -ENOMEM;
9397
9398+#ifdef CONFIG_PAX_RANDMMAP
9399+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9400+ addr = 0;
9401+#endif
9402+
9403 /* If hint, make sure it matches our alignment restrictions */
9404 if (!fixed && addr) {
9405 addr = _ALIGN_UP(addr, 1ul << pshift);
9406diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9407index d966bbe..372124a 100644
9408--- a/arch/powerpc/platforms/cell/spufs/file.c
9409+++ b/arch/powerpc/platforms/cell/spufs/file.c
9410@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9411 return VM_FAULT_NOPAGE;
9412 }
9413
9414-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9415+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9416 unsigned long address,
9417- void *buf, int len, int write)
9418+ void *buf, size_t len, int write)
9419 {
9420 struct spu_context *ctx = vma->vm_file->private_data;
9421 unsigned long offset = address - vma->vm_start;
9422diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9423index fa934fe..c296056 100644
9424--- a/arch/s390/include/asm/atomic.h
9425+++ b/arch/s390/include/asm/atomic.h
9426@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9427 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9428 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9429
9430+#define atomic64_read_unchecked(v) atomic64_read(v)
9431+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9432+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9433+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9434+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9435+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9436+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9437+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9438+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9439+
9440 #endif /* __ARCH_S390_ATOMIC__ */
9441diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9442index 8d72471..5322500 100644
9443--- a/arch/s390/include/asm/barrier.h
9444+++ b/arch/s390/include/asm/barrier.h
9445@@ -42,7 +42,7 @@
9446 do { \
9447 compiletime_assert_atomic_type(*p); \
9448 barrier(); \
9449- ACCESS_ONCE(*p) = (v); \
9450+ ACCESS_ONCE_RW(*p) = (v); \
9451 } while (0)
9452
9453 #define smp_load_acquire(p) \
9454diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9455index 4d7ccac..d03d0ad 100644
9456--- a/arch/s390/include/asm/cache.h
9457+++ b/arch/s390/include/asm/cache.h
9458@@ -9,8 +9,10 @@
9459 #ifndef __ARCH_S390_CACHE_H
9460 #define __ARCH_S390_CACHE_H
9461
9462-#define L1_CACHE_BYTES 256
9463+#include <linux/const.h>
9464+
9465 #define L1_CACHE_SHIFT 8
9466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9467 #define NET_SKB_PAD 32
9468
9469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9470diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9471index c9c875d..b4b0e4c 100644
9472--- a/arch/s390/include/asm/elf.h
9473+++ b/arch/s390/include/asm/elf.h
9474@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9475 the loader. We need to make sure that it is out of the way of the program
9476 that it will "exec", and that there is sufficient room for the brk. */
9477
9478-extern unsigned long randomize_et_dyn(void);
9479-#define ELF_ET_DYN_BASE randomize_et_dyn()
9480+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9481+
9482+#ifdef CONFIG_PAX_ASLR
9483+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9484+
9485+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9486+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9487+#endif
9488
9489 /* This yields a mask that user programs can use to figure out what
9490 instruction set this CPU supports. */
9491@@ -225,9 +231,6 @@ struct linux_binprm;
9492 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9493 int arch_setup_additional_pages(struct linux_binprm *, int);
9494
9495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9496-#define arch_randomize_brk arch_randomize_brk
9497-
9498 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9499
9500 #endif
9501diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9502index c4a93d6..4d2a9b4 100644
9503--- a/arch/s390/include/asm/exec.h
9504+++ b/arch/s390/include/asm/exec.h
9505@@ -7,6 +7,6 @@
9506 #ifndef __ASM_EXEC_H
9507 #define __ASM_EXEC_H
9508
9509-extern unsigned long arch_align_stack(unsigned long sp);
9510+#define arch_align_stack(x) ((x) & ~0xfUL)
9511
9512 #endif /* __ASM_EXEC_H */
9513diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9514index cd4c68e..6764641 100644
9515--- a/arch/s390/include/asm/uaccess.h
9516+++ b/arch/s390/include/asm/uaccess.h
9517@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9518 __range_ok((unsigned long)(addr), (size)); \
9519 })
9520
9521+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9522 #define access_ok(type, addr, size) __access_ok(addr, size)
9523
9524 /*
9525@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9526 copy_to_user(void __user *to, const void *from, unsigned long n)
9527 {
9528 might_fault();
9529+
9530+ if ((long)n < 0)
9531+ return n;
9532+
9533 return __copy_to_user(to, from, n);
9534 }
9535
9536@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9537 static inline unsigned long __must_check
9538 copy_from_user(void *to, const void __user *from, unsigned long n)
9539 {
9540- unsigned int sz = __compiletime_object_size(to);
9541+ size_t sz = __compiletime_object_size(to);
9542
9543 might_fault();
9544- if (unlikely(sz != -1 && sz < n)) {
9545+
9546+ if ((long)n < 0)
9547+ return n;
9548+
9549+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9550 copy_from_user_overflow();
9551 return n;
9552 }
9553diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9554index 2ca9586..55682a9 100644
9555--- a/arch/s390/kernel/module.c
9556+++ b/arch/s390/kernel/module.c
9557@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9558
9559 /* Increase core size by size of got & plt and set start
9560 offsets for got and plt. */
9561- me->core_size = ALIGN(me->core_size, 4);
9562- me->arch.got_offset = me->core_size;
9563- me->core_size += me->arch.got_size;
9564- me->arch.plt_offset = me->core_size;
9565- me->core_size += me->arch.plt_size;
9566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9567+ me->arch.got_offset = me->core_size_rw;
9568+ me->core_size_rw += me->arch.got_size;
9569+ me->arch.plt_offset = me->core_size_rx;
9570+ me->core_size_rx += me->arch.plt_size;
9571 return 0;
9572 }
9573
9574@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9575 if (info->got_initialized == 0) {
9576 Elf_Addr *gotent;
9577
9578- gotent = me->module_core + me->arch.got_offset +
9579+ gotent = me->module_core_rw + me->arch.got_offset +
9580 info->got_offset;
9581 *gotent = val;
9582 info->got_initialized = 1;
9583@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9584 rc = apply_rela_bits(loc, val, 0, 64, 0);
9585 else if (r_type == R_390_GOTENT ||
9586 r_type == R_390_GOTPLTENT) {
9587- val += (Elf_Addr) me->module_core - loc;
9588+ val += (Elf_Addr) me->module_core_rw - loc;
9589 rc = apply_rela_bits(loc, val, 1, 32, 1);
9590 }
9591 break;
9592@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9594 if (info->plt_initialized == 0) {
9595 unsigned int *ip;
9596- ip = me->module_core + me->arch.plt_offset +
9597+ ip = me->module_core_rx + me->arch.plt_offset +
9598 info->plt_offset;
9599 #ifndef CONFIG_64BIT
9600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9601@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9602 val - loc + 0xffffUL < 0x1ffffeUL) ||
9603 (r_type == R_390_PLT32DBL &&
9604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9605- val = (Elf_Addr) me->module_core +
9606+ val = (Elf_Addr) me->module_core_rx +
9607 me->arch.plt_offset +
9608 info->plt_offset;
9609 val += rela->r_addend - loc;
9610@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9613 val = val + rela->r_addend -
9614- ((Elf_Addr) me->module_core + me->arch.got_offset);
9615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9616 if (r_type == R_390_GOTOFF16)
9617 rc = apply_rela_bits(loc, val, 0, 16, 0);
9618 else if (r_type == R_390_GOTOFF32)
9619@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9620 break;
9621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9625 rela->r_addend - loc;
9626 if (r_type == R_390_GOTPC)
9627 rc = apply_rela_bits(loc, val, 1, 32, 0);
9628diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9629index 13fc097..84d375f 100644
9630--- a/arch/s390/kernel/process.c
9631+++ b/arch/s390/kernel/process.c
9632@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
9633 }
9634 return 0;
9635 }
9636-
9637-unsigned long arch_align_stack(unsigned long sp)
9638-{
9639- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9640- sp -= get_random_int() & ~PAGE_MASK;
9641- return sp & ~0xf;
9642-}
9643-
9644-static inline unsigned long brk_rnd(void)
9645-{
9646- /* 8MB for 32bit, 1GB for 64bit */
9647- if (is_32bit_task())
9648- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9649- else
9650- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9651-}
9652-
9653-unsigned long arch_randomize_brk(struct mm_struct *mm)
9654-{
9655- unsigned long ret;
9656-
9657- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9658- return (ret > mm->brk) ? ret : mm->brk;
9659-}
9660diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9661index 179a2c2..371e85c 100644
9662--- a/arch/s390/mm/mmap.c
9663+++ b/arch/s390/mm/mmap.c
9664@@ -204,9 +204,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9665 */
9666 if (mmap_is_legacy()) {
9667 mm->mmap_base = mmap_base_legacy();
9668+
9669+#ifdef CONFIG_PAX_RANDMMAP
9670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9671+ mm->mmap_base += mm->delta_mmap;
9672+#endif
9673+
9674 mm->get_unmapped_area = arch_get_unmapped_area;
9675 } else {
9676 mm->mmap_base = mmap_base();
9677+
9678+#ifdef CONFIG_PAX_RANDMMAP
9679+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9680+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9681+#endif
9682+
9683 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9684 }
9685 }
9686@@ -279,9 +291,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9687 */
9688 if (mmap_is_legacy()) {
9689 mm->mmap_base = mmap_base_legacy();
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base += mm->delta_mmap;
9694+#endif
9695+
9696 mm->get_unmapped_area = s390_get_unmapped_area;
9697 } else {
9698 mm->mmap_base = mmap_base();
9699+
9700+#ifdef CONFIG_PAX_RANDMMAP
9701+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9702+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9703+#endif
9704+
9705 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9706 }
9707 }
9708diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9709index ae3d59f..f65f075 100644
9710--- a/arch/score/include/asm/cache.h
9711+++ b/arch/score/include/asm/cache.h
9712@@ -1,7 +1,9 @@
9713 #ifndef _ASM_SCORE_CACHE_H
9714 #define _ASM_SCORE_CACHE_H
9715
9716+#include <linux/const.h>
9717+
9718 #define L1_CACHE_SHIFT 4
9719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9721
9722 #endif /* _ASM_SCORE_CACHE_H */
9723diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9724index f9f3cd5..58ff438 100644
9725--- a/arch/score/include/asm/exec.h
9726+++ b/arch/score/include/asm/exec.h
9727@@ -1,6 +1,6 @@
9728 #ifndef _ASM_SCORE_EXEC_H
9729 #define _ASM_SCORE_EXEC_H
9730
9731-extern unsigned long arch_align_stack(unsigned long sp);
9732+#define arch_align_stack(x) (x)
9733
9734 #endif /* _ASM_SCORE_EXEC_H */
9735diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9736index a1519ad3..e8ac1ff 100644
9737--- a/arch/score/kernel/process.c
9738+++ b/arch/score/kernel/process.c
9739@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9740
9741 return task_pt_regs(task)->cp0_epc;
9742 }
9743-
9744-unsigned long arch_align_stack(unsigned long sp)
9745-{
9746- return sp;
9747-}
9748diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9749index ef9e555..331bd29 100644
9750--- a/arch/sh/include/asm/cache.h
9751+++ b/arch/sh/include/asm/cache.h
9752@@ -9,10 +9,11 @@
9753 #define __ASM_SH_CACHE_H
9754 #ifdef __KERNEL__
9755
9756+#include <linux/const.h>
9757 #include <linux/init.h>
9758 #include <cpu/cache.h>
9759
9760-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9761+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9762
9763 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9764
9765diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9766index 6777177..cb5e44f 100644
9767--- a/arch/sh/mm/mmap.c
9768+++ b/arch/sh/mm/mmap.c
9769@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9770 struct mm_struct *mm = current->mm;
9771 struct vm_area_struct *vma;
9772 int do_colour_align;
9773+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9774 struct vm_unmapped_area_info info;
9775
9776 if (flags & MAP_FIXED) {
9777@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9778 if (filp || (flags & MAP_SHARED))
9779 do_colour_align = 1;
9780
9781+#ifdef CONFIG_PAX_RANDMMAP
9782+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9783+#endif
9784+
9785 if (addr) {
9786 if (do_colour_align)
9787 addr = COLOUR_ALIGN(addr, pgoff);
9788@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9789 addr = PAGE_ALIGN(addr);
9790
9791 vma = find_vma(mm, addr);
9792- if (TASK_SIZE - len >= addr &&
9793- (!vma || addr + len <= vma->vm_start))
9794+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9795 return addr;
9796 }
9797
9798 info.flags = 0;
9799 info.length = len;
9800- info.low_limit = TASK_UNMAPPED_BASE;
9801+ info.low_limit = mm->mmap_base;
9802 info.high_limit = TASK_SIZE;
9803 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9804 info.align_offset = pgoff << PAGE_SHIFT;
9805@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9806 struct mm_struct *mm = current->mm;
9807 unsigned long addr = addr0;
9808 int do_colour_align;
9809+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9810 struct vm_unmapped_area_info info;
9811
9812 if (flags & MAP_FIXED) {
9813@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9814 if (filp || (flags & MAP_SHARED))
9815 do_colour_align = 1;
9816
9817+#ifdef CONFIG_PAX_RANDMMAP
9818+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9819+#endif
9820+
9821 /* requesting a specific address */
9822 if (addr) {
9823 if (do_colour_align)
9824@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9825 addr = PAGE_ALIGN(addr);
9826
9827 vma = find_vma(mm, addr);
9828- if (TASK_SIZE - len >= addr &&
9829- (!vma || addr + len <= vma->vm_start))
9830+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9831 return addr;
9832 }
9833
9834@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9835 VM_BUG_ON(addr != -ENOMEM);
9836 info.flags = 0;
9837 info.low_limit = TASK_UNMAPPED_BASE;
9838+
9839+#ifdef CONFIG_PAX_RANDMMAP
9840+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9841+ info.low_limit += mm->delta_mmap;
9842+#endif
9843+
9844 info.high_limit = TASK_SIZE;
9845 addr = vm_unmapped_area(&info);
9846 }
9847diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9848index 4082749..fd97781 100644
9849--- a/arch/sparc/include/asm/atomic_64.h
9850+++ b/arch/sparc/include/asm/atomic_64.h
9851@@ -15,18 +15,38 @@
9852 #define ATOMIC64_INIT(i) { (i) }
9853
9854 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9855+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9856+{
9857+ return ACCESS_ONCE(v->counter);
9858+}
9859 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9860+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9861+{
9862+ return ACCESS_ONCE(v->counter);
9863+}
9864
9865 #define atomic_set(v, i) (((v)->counter) = i)
9866+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9867+{
9868+ v->counter = i;
9869+}
9870 #define atomic64_set(v, i) (((v)->counter) = i)
9871+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9872+{
9873+ v->counter = i;
9874+}
9875
9876-#define ATOMIC_OP(op) \
9877-void atomic_##op(int, atomic_t *); \
9878-void atomic64_##op(long, atomic64_t *);
9879+#define __ATOMIC_OP(op, suffix) \
9880+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9881+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9882
9883-#define ATOMIC_OP_RETURN(op) \
9884-int atomic_##op##_return(int, atomic_t *); \
9885-long atomic64_##op##_return(long, atomic64_t *);
9886+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9887+
9888+#define __ATOMIC_OP_RETURN(op, suffix) \
9889+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9890+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9891+
9892+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9893
9894 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9895
9896@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9897
9898 #undef ATOMIC_OPS
9899 #undef ATOMIC_OP_RETURN
9900+#undef __ATOMIC_OP_RETURN
9901 #undef ATOMIC_OP
9902+#undef __ATOMIC_OP
9903
9904 #define atomic_dec_return(v) atomic_sub_return(1, v)
9905 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9906
9907 #define atomic_inc_return(v) atomic_add_return(1, v)
9908+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9909+{
9910+ return atomic_add_return_unchecked(1, v);
9911+}
9912 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9913+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9914+{
9915+ return atomic64_add_return_unchecked(1, v);
9916+}
9917
9918 /*
9919 * atomic_inc_and_test - increment and test
9920@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9921 * other cases.
9922 */
9923 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9924+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9925+{
9926+ return atomic_inc_return_unchecked(v) == 0;
9927+}
9928 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9929
9930 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9931@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9932 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9933
9934 #define atomic_inc(v) atomic_add(1, v)
9935+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9936+{
9937+ atomic_add_unchecked(1, v);
9938+}
9939 #define atomic64_inc(v) atomic64_add(1, v)
9940+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9941+{
9942+ atomic64_add_unchecked(1, v);
9943+}
9944
9945 #define atomic_dec(v) atomic_sub(1, v)
9946+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9947+{
9948+ atomic_sub_unchecked(1, v);
9949+}
9950 #define atomic64_dec(v) atomic64_sub(1, v)
9951+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9952+{
9953+ atomic64_sub_unchecked(1, v);
9954+}
9955
9956 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9957 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9958
9959 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9960+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9961+{
9962+ return cmpxchg(&v->counter, old, new);
9963+}
9964 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9965+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9966+{
9967+ return xchg(&v->counter, new);
9968+}
9969
9970 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9971 {
9972- int c, old;
9973+ int c, old, new;
9974 c = atomic_read(v);
9975 for (;;) {
9976- if (unlikely(c == (u)))
9977+ if (unlikely(c == u))
9978 break;
9979- old = atomic_cmpxchg((v), c, c + (a));
9980+
9981+ asm volatile("addcc %2, %0, %0\n"
9982+
9983+#ifdef CONFIG_PAX_REFCOUNT
9984+ "tvs %%icc, 6\n"
9985+#endif
9986+
9987+ : "=r" (new)
9988+ : "0" (c), "ir" (a)
9989+ : "cc");
9990+
9991+ old = atomic_cmpxchg(v, c, new);
9992 if (likely(old == c))
9993 break;
9994 c = old;
9995@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9996 #define atomic64_cmpxchg(v, o, n) \
9997 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9998 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9999+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10000+{
10001+ return xchg(&v->counter, new);
10002+}
10003
10004 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10005 {
10006- long c, old;
10007+ long c, old, new;
10008 c = atomic64_read(v);
10009 for (;;) {
10010- if (unlikely(c == (u)))
10011+ if (unlikely(c == u))
10012 break;
10013- old = atomic64_cmpxchg((v), c, c + (a));
10014+
10015+ asm volatile("addcc %2, %0, %0\n"
10016+
10017+#ifdef CONFIG_PAX_REFCOUNT
10018+ "tvs %%xcc, 6\n"
10019+#endif
10020+
10021+ : "=r" (new)
10022+ : "0" (c), "ir" (a)
10023+ : "cc");
10024+
10025+ old = atomic64_cmpxchg(v, c, new);
10026 if (likely(old == c))
10027 break;
10028 c = old;
10029 }
10030- return c != (u);
10031+ return c != u;
10032 }
10033
10034 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10035diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10036index 7664894..45a974b 100644
10037--- a/arch/sparc/include/asm/barrier_64.h
10038+++ b/arch/sparc/include/asm/barrier_64.h
10039@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10040 do { \
10041 compiletime_assert_atomic_type(*p); \
10042 barrier(); \
10043- ACCESS_ONCE(*p) = (v); \
10044+ ACCESS_ONCE_RW(*p) = (v); \
10045 } while (0)
10046
10047 #define smp_load_acquire(p) \
10048diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10049index 5bb6991..5c2132e 100644
10050--- a/arch/sparc/include/asm/cache.h
10051+++ b/arch/sparc/include/asm/cache.h
10052@@ -7,10 +7,12 @@
10053 #ifndef _SPARC_CACHE_H
10054 #define _SPARC_CACHE_H
10055
10056+#include <linux/const.h>
10057+
10058 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10059
10060 #define L1_CACHE_SHIFT 5
10061-#define L1_CACHE_BYTES 32
10062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10063
10064 #ifdef CONFIG_SPARC32
10065 #define SMP_CACHE_BYTES_SHIFT 5
10066diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10067index a24e41f..47677ff 100644
10068--- a/arch/sparc/include/asm/elf_32.h
10069+++ b/arch/sparc/include/asm/elf_32.h
10070@@ -114,6 +114,13 @@ typedef struct {
10071
10072 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10073
10074+#ifdef CONFIG_PAX_ASLR
10075+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10076+
10077+#define PAX_DELTA_MMAP_LEN 16
10078+#define PAX_DELTA_STACK_LEN 16
10079+#endif
10080+
10081 /* This yields a mask that user programs can use to figure out what
10082 instruction set this cpu supports. This can NOT be done in userspace
10083 on Sparc. */
10084diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10085index 370ca1e..d4f4a98 100644
10086--- a/arch/sparc/include/asm/elf_64.h
10087+++ b/arch/sparc/include/asm/elf_64.h
10088@@ -189,6 +189,13 @@ typedef struct {
10089 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10090 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10091
10092+#ifdef CONFIG_PAX_ASLR
10093+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10094+
10095+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10096+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10097+#endif
10098+
10099 extern unsigned long sparc64_elf_hwcap;
10100 #define ELF_HWCAP sparc64_elf_hwcap
10101
10102diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10103index a3890da..f6a408e 100644
10104--- a/arch/sparc/include/asm/pgalloc_32.h
10105+++ b/arch/sparc/include/asm/pgalloc_32.h
10106@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10107 }
10108
10109 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10110+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10111
10112 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10113 unsigned long address)
10114diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10115index 5e31871..13469c6 100644
10116--- a/arch/sparc/include/asm/pgalloc_64.h
10117+++ b/arch/sparc/include/asm/pgalloc_64.h
10118@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10119 }
10120
10121 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10122+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10123
10124 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10125 {
10126@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10127 }
10128
10129 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10130+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10131
10132 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10133 {
10134diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10135index 59ba6f6..4518128 100644
10136--- a/arch/sparc/include/asm/pgtable.h
10137+++ b/arch/sparc/include/asm/pgtable.h
10138@@ -5,4 +5,8 @@
10139 #else
10140 #include <asm/pgtable_32.h>
10141 #endif
10142+
10143+#define ktla_ktva(addr) (addr)
10144+#define ktva_ktla(addr) (addr)
10145+
10146 #endif
10147diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10148index f06b36a..bca3189 100644
10149--- a/arch/sparc/include/asm/pgtable_32.h
10150+++ b/arch/sparc/include/asm/pgtable_32.h
10151@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10152 #define PAGE_SHARED SRMMU_PAGE_SHARED
10153 #define PAGE_COPY SRMMU_PAGE_COPY
10154 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10155+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10156+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10157+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10158 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10159
10160 /* Top-level page directory - dummy used by init-mm.
10161@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10162
10163 /* xwr */
10164 #define __P000 PAGE_NONE
10165-#define __P001 PAGE_READONLY
10166-#define __P010 PAGE_COPY
10167-#define __P011 PAGE_COPY
10168+#define __P001 PAGE_READONLY_NOEXEC
10169+#define __P010 PAGE_COPY_NOEXEC
10170+#define __P011 PAGE_COPY_NOEXEC
10171 #define __P100 PAGE_READONLY
10172 #define __P101 PAGE_READONLY
10173 #define __P110 PAGE_COPY
10174 #define __P111 PAGE_COPY
10175
10176 #define __S000 PAGE_NONE
10177-#define __S001 PAGE_READONLY
10178-#define __S010 PAGE_SHARED
10179-#define __S011 PAGE_SHARED
10180+#define __S001 PAGE_READONLY_NOEXEC
10181+#define __S010 PAGE_SHARED_NOEXEC
10182+#define __S011 PAGE_SHARED_NOEXEC
10183 #define __S100 PAGE_READONLY
10184 #define __S101 PAGE_READONLY
10185 #define __S110 PAGE_SHARED
10186diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10187index ae51a11..eadfd03 100644
10188--- a/arch/sparc/include/asm/pgtsrmmu.h
10189+++ b/arch/sparc/include/asm/pgtsrmmu.h
10190@@ -111,6 +111,11 @@
10191 SRMMU_EXEC | SRMMU_REF)
10192 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10193 SRMMU_EXEC | SRMMU_REF)
10194+
10195+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10196+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10197+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10198+
10199 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10200 SRMMU_DIRTY | SRMMU_REF)
10201
10202diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10203index 29d64b1..4272fe8 100644
10204--- a/arch/sparc/include/asm/setup.h
10205+++ b/arch/sparc/include/asm/setup.h
10206@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10207 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10208
10209 /* init_64.c */
10210-extern atomic_t dcpage_flushes;
10211-extern atomic_t dcpage_flushes_xcall;
10212+extern atomic_unchecked_t dcpage_flushes;
10213+extern atomic_unchecked_t dcpage_flushes_xcall;
10214
10215 extern int sysctl_tsb_ratio;
10216 #endif
10217diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10218index 9689176..63c18ea 100644
10219--- a/arch/sparc/include/asm/spinlock_64.h
10220+++ b/arch/sparc/include/asm/spinlock_64.h
10221@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10222
10223 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10224
10225-static void inline arch_read_lock(arch_rwlock_t *lock)
10226+static inline void arch_read_lock(arch_rwlock_t *lock)
10227 {
10228 unsigned long tmp1, tmp2;
10229
10230 __asm__ __volatile__ (
10231 "1: ldsw [%2], %0\n"
10232 " brlz,pn %0, 2f\n"
10233-"4: add %0, 1, %1\n"
10234+"4: addcc %0, 1, %1\n"
10235+
10236+#ifdef CONFIG_PAX_REFCOUNT
10237+" tvs %%icc, 6\n"
10238+#endif
10239+
10240 " cas [%2], %0, %1\n"
10241 " cmp %0, %1\n"
10242 " bne,pn %%icc, 1b\n"
10243@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10244 " .previous"
10245 : "=&r" (tmp1), "=&r" (tmp2)
10246 : "r" (lock)
10247- : "memory");
10248+ : "memory", "cc");
10249 }
10250
10251-static int inline arch_read_trylock(arch_rwlock_t *lock)
10252+static inline int arch_read_trylock(arch_rwlock_t *lock)
10253 {
10254 int tmp1, tmp2;
10255
10256@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10257 "1: ldsw [%2], %0\n"
10258 " brlz,a,pn %0, 2f\n"
10259 " mov 0, %0\n"
10260-" add %0, 1, %1\n"
10261+" addcc %0, 1, %1\n"
10262+
10263+#ifdef CONFIG_PAX_REFCOUNT
10264+" tvs %%icc, 6\n"
10265+#endif
10266+
10267 " cas [%2], %0, %1\n"
10268 " cmp %0, %1\n"
10269 " bne,pn %%icc, 1b\n"
10270@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10271 return tmp1;
10272 }
10273
10274-static void inline arch_read_unlock(arch_rwlock_t *lock)
10275+static inline void arch_read_unlock(arch_rwlock_t *lock)
10276 {
10277 unsigned long tmp1, tmp2;
10278
10279 __asm__ __volatile__(
10280 "1: lduw [%2], %0\n"
10281-" sub %0, 1, %1\n"
10282+" subcc %0, 1, %1\n"
10283+
10284+#ifdef CONFIG_PAX_REFCOUNT
10285+" tvs %%icc, 6\n"
10286+#endif
10287+
10288 " cas [%2], %0, %1\n"
10289 " cmp %0, %1\n"
10290 " bne,pn %%xcc, 1b\n"
10291@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10292 : "memory");
10293 }
10294
10295-static void inline arch_write_lock(arch_rwlock_t *lock)
10296+static inline void arch_write_lock(arch_rwlock_t *lock)
10297 {
10298 unsigned long mask, tmp1, tmp2;
10299
10300@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10301 : "memory");
10302 }
10303
10304-static void inline arch_write_unlock(arch_rwlock_t *lock)
10305+static inline void arch_write_unlock(arch_rwlock_t *lock)
10306 {
10307 __asm__ __volatile__(
10308 " stw %%g0, [%0]"
10309@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10310 : "memory");
10311 }
10312
10313-static int inline arch_write_trylock(arch_rwlock_t *lock)
10314+static inline int arch_write_trylock(arch_rwlock_t *lock)
10315 {
10316 unsigned long mask, tmp1, tmp2, result;
10317
10318diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10319index fd7bd0a..2e2fa7a 100644
10320--- a/arch/sparc/include/asm/thread_info_32.h
10321+++ b/arch/sparc/include/asm/thread_info_32.h
10322@@ -47,6 +47,7 @@ struct thread_info {
10323 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10324 unsigned long rwbuf_stkptrs[NSWINS];
10325 unsigned long w_saved;
10326+ unsigned long lowest_stack;
10327 };
10328
10329 /*
10330diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10331index ff45516..73001ab 100644
10332--- a/arch/sparc/include/asm/thread_info_64.h
10333+++ b/arch/sparc/include/asm/thread_info_64.h
10334@@ -61,6 +61,8 @@ struct thread_info {
10335 struct pt_regs *kern_una_regs;
10336 unsigned int kern_una_insn;
10337
10338+ unsigned long lowest_stack;
10339+
10340 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10341 __attribute__ ((aligned(64)));
10342 };
10343@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10344 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10345 /* flag bit 4 is available */
10346 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10347-/* flag bit 6 is available */
10348+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10349 #define TIF_32BIT 7 /* 32-bit binary */
10350 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10351 #define TIF_SECCOMP 9 /* secure computing */
10352 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10353 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10354+
10355 /* NOTE: Thread flags >= 12 should be ones we have no interest
10356 * in using in assembly, else we can't use the mask as
10357 * an immediate value in instructions such as andcc.
10358@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10359 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10360 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10361 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10362+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10363
10364 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10365 _TIF_DO_NOTIFY_RESUME_MASK | \
10366 _TIF_NEED_RESCHED)
10367 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10368
10369+#define _TIF_WORK_SYSCALL \
10370+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10371+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10372+
10373 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10374
10375 /*
10376diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10377index bd56c28..4b63d83 100644
10378--- a/arch/sparc/include/asm/uaccess.h
10379+++ b/arch/sparc/include/asm/uaccess.h
10380@@ -1,5 +1,6 @@
10381 #ifndef ___ASM_SPARC_UACCESS_H
10382 #define ___ASM_SPARC_UACCESS_H
10383+
10384 #if defined(__sparc__) && defined(__arch64__)
10385 #include <asm/uaccess_64.h>
10386 #else
10387diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10388index 64ee103..388aef0 100644
10389--- a/arch/sparc/include/asm/uaccess_32.h
10390+++ b/arch/sparc/include/asm/uaccess_32.h
10391@@ -47,6 +47,7 @@
10392 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10393 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10394 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10395+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10396 #define access_ok(type, addr, size) \
10397 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10398
10399@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10400
10401 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10402 {
10403- if (n && __access_ok((unsigned long) to, n))
10404+ if ((long)n < 0)
10405+ return n;
10406+
10407+ if (n && __access_ok((unsigned long) to, n)) {
10408+ if (!__builtin_constant_p(n))
10409+ check_object_size(from, n, true);
10410 return __copy_user(to, (__force void __user *) from, n);
10411- else
10412+ } else
10413 return n;
10414 }
10415
10416 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10417 {
10418+ if ((long)n < 0)
10419+ return n;
10420+
10421+ if (!__builtin_constant_p(n))
10422+ check_object_size(from, n, true);
10423+
10424 return __copy_user(to, (__force void __user *) from, n);
10425 }
10426
10427 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10428 {
10429- if (n && __access_ok((unsigned long) from, n))
10430+ if ((long)n < 0)
10431+ return n;
10432+
10433+ if (n && __access_ok((unsigned long) from, n)) {
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(to, n, false);
10436 return __copy_user((__force void __user *) to, from, n);
10437- else
10438+ } else
10439 return n;
10440 }
10441
10442 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10443 {
10444+ if ((long)n < 0)
10445+ return n;
10446+
10447 return __copy_user((__force void __user *) to, from, n);
10448 }
10449
10450diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10451index a35194b..47dabc0d 100644
10452--- a/arch/sparc/include/asm/uaccess_64.h
10453+++ b/arch/sparc/include/asm/uaccess_64.h
10454@@ -10,6 +10,7 @@
10455 #include <linux/compiler.h>
10456 #include <linux/string.h>
10457 #include <linux/thread_info.h>
10458+#include <linux/kernel.h>
10459 #include <asm/asi.h>
10460 #include <asm/spitfire.h>
10461 #include <asm-generic/uaccess-unaligned.h>
10462@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10463 return 1;
10464 }
10465
10466+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10467+{
10468+ return 1;
10469+}
10470+
10471 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10472 {
10473 return 1;
10474@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10475 static inline unsigned long __must_check
10476 copy_from_user(void *to, const void __user *from, unsigned long size)
10477 {
10478- unsigned long ret = ___copy_from_user(to, from, size);
10479+ unsigned long ret;
10480
10481+ if ((long)size < 0 || size > INT_MAX)
10482+ return size;
10483+
10484+ if (!__builtin_constant_p(size))
10485+ check_object_size(to, size, false);
10486+
10487+ ret = ___copy_from_user(to, from, size);
10488 if (unlikely(ret))
10489 ret = copy_from_user_fixup(to, from, size);
10490
10491@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10492 static inline unsigned long __must_check
10493 copy_to_user(void __user *to, const void *from, unsigned long size)
10494 {
10495- unsigned long ret = ___copy_to_user(to, from, size);
10496+ unsigned long ret;
10497
10498+ if ((long)size < 0 || size > INT_MAX)
10499+ return size;
10500+
10501+ if (!__builtin_constant_p(size))
10502+ check_object_size(from, size, true);
10503+
10504+ ret = ___copy_to_user(to, from, size);
10505 if (unlikely(ret))
10506 ret = copy_to_user_fixup(to, from, size);
10507 return ret;
10508diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10509index 7cf9c6e..6206648 100644
10510--- a/arch/sparc/kernel/Makefile
10511+++ b/arch/sparc/kernel/Makefile
10512@@ -4,7 +4,7 @@
10513 #
10514
10515 asflags-y := -ansi
10516-ccflags-y := -Werror
10517+#ccflags-y := -Werror
10518
10519 extra-y := head_$(BITS).o
10520
10521diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10522index 50e7b62..79fae35 100644
10523--- a/arch/sparc/kernel/process_32.c
10524+++ b/arch/sparc/kernel/process_32.c
10525@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10526
10527 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10528 r->psr, r->pc, r->npc, r->y, print_tainted());
10529- printk("PC: <%pS>\n", (void *) r->pc);
10530+ printk("PC: <%pA>\n", (void *) r->pc);
10531 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10532 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10533 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10534 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10535 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10536 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10537- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10538+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10539
10540 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10541 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10542@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10543 rw = (struct reg_window32 *) fp;
10544 pc = rw->ins[7];
10545 printk("[%08lx : ", pc);
10546- printk("%pS ] ", (void *) pc);
10547+ printk("%pA ] ", (void *) pc);
10548 fp = rw->ins[6];
10549 } while (++count < 16);
10550 printk("\n");
10551diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10552index 46a5964..a35c62c 100644
10553--- a/arch/sparc/kernel/process_64.c
10554+++ b/arch/sparc/kernel/process_64.c
10555@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10556 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10557 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10558 if (regs->tstate & TSTATE_PRIV)
10559- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10560+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10561 }
10562
10563 void show_regs(struct pt_regs *regs)
10564@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10565
10566 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10567 regs->tpc, regs->tnpc, regs->y, print_tainted());
10568- printk("TPC: <%pS>\n", (void *) regs->tpc);
10569+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10570 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10571 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10572 regs->u_regs[3]);
10573@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10574 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10575 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10576 regs->u_regs[15]);
10577- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10578+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10579 show_regwindow(regs);
10580 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10581 }
10582@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10583 ((tp && tp->task) ? tp->task->pid : -1));
10584
10585 if (gp->tstate & TSTATE_PRIV) {
10586- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10587+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10588 (void *) gp->tpc,
10589 (void *) gp->o7,
10590 (void *) gp->i7,
10591diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10592index 79cc0d1..ec62734 100644
10593--- a/arch/sparc/kernel/prom_common.c
10594+++ b/arch/sparc/kernel/prom_common.c
10595@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10596
10597 unsigned int prom_early_allocated __initdata;
10598
10599-static struct of_pdt_ops prom_sparc_ops __initdata = {
10600+static struct of_pdt_ops prom_sparc_ops __initconst = {
10601 .nextprop = prom_common_nextprop,
10602 .getproplen = prom_getproplen,
10603 .getproperty = prom_getproperty,
10604diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10605index 9ddc492..27a5619 100644
10606--- a/arch/sparc/kernel/ptrace_64.c
10607+++ b/arch/sparc/kernel/ptrace_64.c
10608@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10609 return ret;
10610 }
10611
10612+#ifdef CONFIG_GRKERNSEC_SETXID
10613+extern void gr_delayed_cred_worker(void);
10614+#endif
10615+
10616 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10617 {
10618 int ret = 0;
10619@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10620 if (test_thread_flag(TIF_NOHZ))
10621 user_exit();
10622
10623+#ifdef CONFIG_GRKERNSEC_SETXID
10624+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10625+ gr_delayed_cred_worker();
10626+#endif
10627+
10628 if (test_thread_flag(TIF_SYSCALL_TRACE))
10629 ret = tracehook_report_syscall_entry(regs);
10630
10631@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10632 if (test_thread_flag(TIF_NOHZ))
10633 user_exit();
10634
10635+#ifdef CONFIG_GRKERNSEC_SETXID
10636+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10637+ gr_delayed_cred_worker();
10638+#endif
10639+
10640 audit_syscall_exit(regs);
10641
10642 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10643diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10644index 61139d9..c1a5f28 100644
10645--- a/arch/sparc/kernel/smp_64.c
10646+++ b/arch/sparc/kernel/smp_64.c
10647@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10648 return;
10649
10650 #ifdef CONFIG_DEBUG_DCFLUSH
10651- atomic_inc(&dcpage_flushes);
10652+ atomic_inc_unchecked(&dcpage_flushes);
10653 #endif
10654
10655 this_cpu = get_cpu();
10656@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10657 xcall_deliver(data0, __pa(pg_addr),
10658 (u64) pg_addr, cpumask_of(cpu));
10659 #ifdef CONFIG_DEBUG_DCFLUSH
10660- atomic_inc(&dcpage_flushes_xcall);
10661+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10662 #endif
10663 }
10664 }
10665@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10666 preempt_disable();
10667
10668 #ifdef CONFIG_DEBUG_DCFLUSH
10669- atomic_inc(&dcpage_flushes);
10670+ atomic_inc_unchecked(&dcpage_flushes);
10671 #endif
10672 data0 = 0;
10673 pg_addr = page_address(page);
10674@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10675 xcall_deliver(data0, __pa(pg_addr),
10676 (u64) pg_addr, cpu_online_mask);
10677 #ifdef CONFIG_DEBUG_DCFLUSH
10678- atomic_inc(&dcpage_flushes_xcall);
10679+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10680 #endif
10681 }
10682 __local_flush_dcache_page(page);
10683diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10684index 646988d..b88905f 100644
10685--- a/arch/sparc/kernel/sys_sparc_32.c
10686+++ b/arch/sparc/kernel/sys_sparc_32.c
10687@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10688 if (len > TASK_SIZE - PAGE_SIZE)
10689 return -ENOMEM;
10690 if (!addr)
10691- addr = TASK_UNMAPPED_BASE;
10692+ addr = current->mm->mmap_base;
10693
10694 info.flags = 0;
10695 info.length = len;
10696diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10697index 30e7ddb..266a3b0 100644
10698--- a/arch/sparc/kernel/sys_sparc_64.c
10699+++ b/arch/sparc/kernel/sys_sparc_64.c
10700@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10701 struct vm_area_struct * vma;
10702 unsigned long task_size = TASK_SIZE;
10703 int do_color_align;
10704+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10705 struct vm_unmapped_area_info info;
10706
10707 if (flags & MAP_FIXED) {
10708 /* We do not accept a shared mapping if it would violate
10709 * cache aliasing constraints.
10710 */
10711- if ((flags & MAP_SHARED) &&
10712+ if ((filp || (flags & MAP_SHARED)) &&
10713 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10714 return -EINVAL;
10715 return addr;
10716@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10717 if (filp || (flags & MAP_SHARED))
10718 do_color_align = 1;
10719
10720+#ifdef CONFIG_PAX_RANDMMAP
10721+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10722+#endif
10723+
10724 if (addr) {
10725 if (do_color_align)
10726 addr = COLOR_ALIGN(addr, pgoff);
10727@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10728 addr = PAGE_ALIGN(addr);
10729
10730 vma = find_vma(mm, addr);
10731- if (task_size - len >= addr &&
10732- (!vma || addr + len <= vma->vm_start))
10733+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10734 return addr;
10735 }
10736
10737 info.flags = 0;
10738 info.length = len;
10739- info.low_limit = TASK_UNMAPPED_BASE;
10740+ info.low_limit = mm->mmap_base;
10741 info.high_limit = min(task_size, VA_EXCLUDE_START);
10742 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10743 info.align_offset = pgoff << PAGE_SHIFT;
10744+ info.threadstack_offset = offset;
10745 addr = vm_unmapped_area(&info);
10746
10747 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10748 VM_BUG_ON(addr != -ENOMEM);
10749 info.low_limit = VA_EXCLUDE_END;
10750+
10751+#ifdef CONFIG_PAX_RANDMMAP
10752+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10753+ info.low_limit += mm->delta_mmap;
10754+#endif
10755+
10756 info.high_limit = task_size;
10757 addr = vm_unmapped_area(&info);
10758 }
10759@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10760 unsigned long task_size = STACK_TOP32;
10761 unsigned long addr = addr0;
10762 int do_color_align;
10763+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10764 struct vm_unmapped_area_info info;
10765
10766 /* This should only ever run for 32-bit processes. */
10767@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10768 /* We do not accept a shared mapping if it would violate
10769 * cache aliasing constraints.
10770 */
10771- if ((flags & MAP_SHARED) &&
10772+ if ((filp || (flags & MAP_SHARED)) &&
10773 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10774 return -EINVAL;
10775 return addr;
10776@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10777 if (filp || (flags & MAP_SHARED))
10778 do_color_align = 1;
10779
10780+#ifdef CONFIG_PAX_RANDMMAP
10781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10782+#endif
10783+
10784 /* requesting a specific address */
10785 if (addr) {
10786 if (do_color_align)
10787@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10788 addr = PAGE_ALIGN(addr);
10789
10790 vma = find_vma(mm, addr);
10791- if (task_size - len >= addr &&
10792- (!vma || addr + len <= vma->vm_start))
10793+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10794 return addr;
10795 }
10796
10797@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10798 info.high_limit = mm->mmap_base;
10799 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10800 info.align_offset = pgoff << PAGE_SHIFT;
10801+ info.threadstack_offset = offset;
10802 addr = vm_unmapped_area(&info);
10803
10804 /*
10805@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10806 VM_BUG_ON(addr != -ENOMEM);
10807 info.flags = 0;
10808 info.low_limit = TASK_UNMAPPED_BASE;
10809+
10810+#ifdef CONFIG_PAX_RANDMMAP
10811+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10812+ info.low_limit += mm->delta_mmap;
10813+#endif
10814+
10815 info.high_limit = STACK_TOP32;
10816 addr = vm_unmapped_area(&info);
10817 }
10818@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10819 EXPORT_SYMBOL(get_fb_unmapped_area);
10820
10821 /* Essentially the same as PowerPC. */
10822-static unsigned long mmap_rnd(void)
10823+static unsigned long mmap_rnd(struct mm_struct *mm)
10824 {
10825 unsigned long rnd = 0UL;
10826
10827+#ifdef CONFIG_PAX_RANDMMAP
10828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10829+#endif
10830+
10831 if (current->flags & PF_RANDOMIZE) {
10832 unsigned long val = get_random_int();
10833 if (test_thread_flag(TIF_32BIT))
10834@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10835
10836 void arch_pick_mmap_layout(struct mm_struct *mm)
10837 {
10838- unsigned long random_factor = mmap_rnd();
10839+ unsigned long random_factor = mmap_rnd(mm);
10840 unsigned long gap;
10841
10842 /*
10843@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10844 gap == RLIM_INFINITY ||
10845 sysctl_legacy_va_layout) {
10846 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10847+
10848+#ifdef CONFIG_PAX_RANDMMAP
10849+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10850+ mm->mmap_base += mm->delta_mmap;
10851+#endif
10852+
10853 mm->get_unmapped_area = arch_get_unmapped_area;
10854 } else {
10855 /* We know it's 32-bit */
10856@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10857 gap = (task_size / 6 * 5);
10858
10859 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10860+
10861+#ifdef CONFIG_PAX_RANDMMAP
10862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10863+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10864+#endif
10865+
10866 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10867 }
10868 }
10869diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10870index bb00089..e0ea580 100644
10871--- a/arch/sparc/kernel/syscalls.S
10872+++ b/arch/sparc/kernel/syscalls.S
10873@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10874 #endif
10875 .align 32
10876 1: ldx [%g6 + TI_FLAGS], %l5
10877- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10878+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10879 be,pt %icc, rtrap
10880 nop
10881 call syscall_trace_leave
10882@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10883
10884 srl %i3, 0, %o3 ! IEU0
10885 srl %i2, 0, %o2 ! IEU0 Group
10886- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10887+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10888 bne,pn %icc, linux_syscall_trace32 ! CTI
10889 mov %i0, %l5 ! IEU1
10890 5: call %l7 ! CTI Group brk forced
10891@@ -218,7 +218,7 @@ linux_sparc_syscall:
10892
10893 mov %i3, %o3 ! IEU1
10894 mov %i4, %o4 ! IEU0 Group
10895- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10896+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10897 bne,pn %icc, linux_syscall_trace ! CTI Group
10898 mov %i0, %l5 ! IEU0
10899 2: call %l7 ! CTI Group brk forced
10900@@ -233,7 +233,7 @@ ret_sys_call:
10901
10902 cmp %o0, -ERESTART_RESTARTBLOCK
10903 bgeu,pn %xcc, 1f
10904- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10905+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10906 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10907
10908 2:
10909diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10910index 6fd386c5..6907d81 100644
10911--- a/arch/sparc/kernel/traps_32.c
10912+++ b/arch/sparc/kernel/traps_32.c
10913@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10914 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10915 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10916
10917+extern void gr_handle_kernel_exploit(void);
10918+
10919 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10920 {
10921 static int die_counter;
10922@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10923 count++ < 30 &&
10924 (((unsigned long) rw) >= PAGE_OFFSET) &&
10925 !(((unsigned long) rw) & 0x7)) {
10926- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10927+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10928 (void *) rw->ins[7]);
10929 rw = (struct reg_window32 *)rw->ins[6];
10930 }
10931 }
10932 printk("Instruction DUMP:");
10933 instruction_dump ((unsigned long *) regs->pc);
10934- if(regs->psr & PSR_PS)
10935+ if(regs->psr & PSR_PS) {
10936+ gr_handle_kernel_exploit();
10937 do_exit(SIGKILL);
10938+ }
10939 do_exit(SIGSEGV);
10940 }
10941
10942diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10943index 0e69974..0c15a6e 100644
10944--- a/arch/sparc/kernel/traps_64.c
10945+++ b/arch/sparc/kernel/traps_64.c
10946@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10947 i + 1,
10948 p->trapstack[i].tstate, p->trapstack[i].tpc,
10949 p->trapstack[i].tnpc, p->trapstack[i].tt);
10950- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10951+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10952 }
10953 }
10954
10955@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10956
10957 lvl -= 0x100;
10958 if (regs->tstate & TSTATE_PRIV) {
10959+
10960+#ifdef CONFIG_PAX_REFCOUNT
10961+ if (lvl == 6)
10962+ pax_report_refcount_overflow(regs);
10963+#endif
10964+
10965 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10966 die_if_kernel(buffer, regs);
10967 }
10968@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10969 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10970 {
10971 char buffer[32];
10972-
10973+
10974 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10975 0, lvl, SIGTRAP) == NOTIFY_STOP)
10976 return;
10977
10978+#ifdef CONFIG_PAX_REFCOUNT
10979+ if (lvl == 6)
10980+ pax_report_refcount_overflow(regs);
10981+#endif
10982+
10983 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10984
10985 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10986@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10987 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10988 printk("%s" "ERROR(%d): ",
10989 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10990- printk("TPC<%pS>\n", (void *) regs->tpc);
10991+ printk("TPC<%pA>\n", (void *) regs->tpc);
10992 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10993 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10994 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10995@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10996 smp_processor_id(),
10997 (type & 0x1) ? 'I' : 'D',
10998 regs->tpc);
10999- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11000+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11001 panic("Irrecoverable Cheetah+ parity error.");
11002 }
11003
11004@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11005 smp_processor_id(),
11006 (type & 0x1) ? 'I' : 'D',
11007 regs->tpc);
11008- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11009+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11010 }
11011
11012 struct sun4v_error_entry {
11013@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11014 /*0x38*/u64 reserved_5;
11015 };
11016
11017-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11018-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11019+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11020+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11021
11022 static const char *sun4v_err_type_to_str(u8 type)
11023 {
11024@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11025 }
11026
11027 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11028- int cpu, const char *pfx, atomic_t *ocnt)
11029+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11030 {
11031 u64 *raw_ptr = (u64 *) ent;
11032 u32 attrs;
11033@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11034
11035 show_regs(regs);
11036
11037- if ((cnt = atomic_read(ocnt)) != 0) {
11038- atomic_set(ocnt, 0);
11039+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11040+ atomic_set_unchecked(ocnt, 0);
11041 wmb();
11042 printk("%s: Queue overflowed %d times.\n",
11043 pfx, cnt);
11044@@ -2048,7 +2059,7 @@ out:
11045 */
11046 void sun4v_resum_overflow(struct pt_regs *regs)
11047 {
11048- atomic_inc(&sun4v_resum_oflow_cnt);
11049+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11050 }
11051
11052 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11053@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11054 /* XXX Actually even this can make not that much sense. Perhaps
11055 * XXX we should just pull the plug and panic directly from here?
11056 */
11057- atomic_inc(&sun4v_nonresum_oflow_cnt);
11058+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11059 }
11060
11061 static void sun4v_tlb_error(struct pt_regs *regs)
11062@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11063
11064 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11065 regs->tpc, tl);
11066- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11067+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11068 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11069- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11070+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11071 (void *) regs->u_regs[UREG_I7]);
11072 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11073 "pte[%lx] error[%lx]\n",
11074@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11075
11076 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11077 regs->tpc, tl);
11078- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11079+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11080 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11081- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11082+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11083 (void *) regs->u_regs[UREG_I7]);
11084 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11085 "pte[%lx] error[%lx]\n",
11086@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11087 fp = (unsigned long)sf->fp + STACK_BIAS;
11088 }
11089
11090- printk(" [%016lx] %pS\n", pc, (void *) pc);
11091+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11092 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11093 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11094 int index = tsk->curr_ret_stack;
11095 if (tsk->ret_stack && index >= graph) {
11096 pc = tsk->ret_stack[index - graph].ret;
11097- printk(" [%016lx] %pS\n", pc, (void *) pc);
11098+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11099 graph++;
11100 }
11101 }
11102@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11103 return (struct reg_window *) (fp + STACK_BIAS);
11104 }
11105
11106+extern void gr_handle_kernel_exploit(void);
11107+
11108 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11109 {
11110 static int die_counter;
11111@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11112 while (rw &&
11113 count++ < 30 &&
11114 kstack_valid(tp, (unsigned long) rw)) {
11115- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11116+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11117 (void *) rw->ins[7]);
11118
11119 rw = kernel_stack_up(rw);
11120@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11121 }
11122 if (panic_on_oops)
11123 panic("Fatal exception");
11124- if (regs->tstate & TSTATE_PRIV)
11125+ if (regs->tstate & TSTATE_PRIV) {
11126+ gr_handle_kernel_exploit();
11127 do_exit(SIGKILL);
11128+ }
11129 do_exit(SIGSEGV);
11130 }
11131 EXPORT_SYMBOL(die_if_kernel);
11132diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11133index 62098a8..547ab2c 100644
11134--- a/arch/sparc/kernel/unaligned_64.c
11135+++ b/arch/sparc/kernel/unaligned_64.c
11136@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11137 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11138
11139 if (__ratelimit(&ratelimit)) {
11140- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11141+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11142 regs->tpc, (void *) regs->tpc);
11143 }
11144 }
11145diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11146index 3269b02..64f5231 100644
11147--- a/arch/sparc/lib/Makefile
11148+++ b/arch/sparc/lib/Makefile
11149@@ -2,7 +2,7 @@
11150 #
11151
11152 asflags-y := -ansi -DST_DIV0=0x02
11153-ccflags-y := -Werror
11154+#ccflags-y := -Werror
11155
11156 lib-$(CONFIG_SPARC32) += ashrdi3.o
11157 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11158diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11159index 05dac43..76f8ed4 100644
11160--- a/arch/sparc/lib/atomic_64.S
11161+++ b/arch/sparc/lib/atomic_64.S
11162@@ -15,11 +15,22 @@
11163 * a value and does the barriers.
11164 */
11165
11166-#define ATOMIC_OP(op) \
11167-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11168+#ifdef CONFIG_PAX_REFCOUNT
11169+#define __REFCOUNT_OP(op) op##cc
11170+#define __OVERFLOW_IOP tvs %icc, 6;
11171+#define __OVERFLOW_XOP tvs %xcc, 6;
11172+#else
11173+#define __REFCOUNT_OP(op) op
11174+#define __OVERFLOW_IOP
11175+#define __OVERFLOW_XOP
11176+#endif
11177+
11178+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11179+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11180 BACKOFF_SETUP(%o2); \
11181 1: lduw [%o1], %g1; \
11182- op %g1, %o0, %g7; \
11183+ asm_op %g1, %o0, %g7; \
11184+ post_op \
11185 cas [%o1], %g1, %g7; \
11186 cmp %g1, %g7; \
11187 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11188@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11189 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11190 ENDPROC(atomic_##op); \
11191
11192-#define ATOMIC_OP_RETURN(op) \
11193-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11194+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11195+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11196+
11197+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11198+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11199 BACKOFF_SETUP(%o2); \
11200 1: lduw [%o1], %g1; \
11201- op %g1, %o0, %g7; \
11202+ asm_op %g1, %o0, %g7; \
11203+ post_op \
11204 cas [%o1], %g1, %g7; \
11205 cmp %g1, %g7; \
11206 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11207@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11208 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11209 ENDPROC(atomic_##op##_return);
11210
11211+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11212+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11213+
11214 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11215
11216 ATOMIC_OPS(add)
11217@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11218
11219 #undef ATOMIC_OPS
11220 #undef ATOMIC_OP_RETURN
11221+#undef __ATOMIC_OP_RETURN
11222 #undef ATOMIC_OP
11223+#undef __ATOMIC_OP
11224
11225-#define ATOMIC64_OP(op) \
11226-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11227+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11228+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11229 BACKOFF_SETUP(%o2); \
11230 1: ldx [%o1], %g1; \
11231- op %g1, %o0, %g7; \
11232+ asm_op %g1, %o0, %g7; \
11233+ post_op \
11234 casx [%o1], %g1, %g7; \
11235 cmp %g1, %g7; \
11236 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11237@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11238 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11239 ENDPROC(atomic64_##op); \
11240
11241-#define ATOMIC64_OP_RETURN(op) \
11242-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11243+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11244+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11245+
11246+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11247+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11248 BACKOFF_SETUP(%o2); \
11249 1: ldx [%o1], %g1; \
11250- op %g1, %o0, %g7; \
11251+ asm_op %g1, %o0, %g7; \
11252+ post_op \
11253 casx [%o1], %g1, %g7; \
11254 cmp %g1, %g7; \
11255 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11256@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11257 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11258 ENDPROC(atomic64_##op##_return);
11259
11260+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11261+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11262+
11263 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11264
11265 ATOMIC64_OPS(add)
11266@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11267
11268 #undef ATOMIC64_OPS
11269 #undef ATOMIC64_OP_RETURN
11270+#undef __ATOMIC64_OP_RETURN
11271 #undef ATOMIC64_OP
11272+#undef __ATOMIC64_OP
11273+#undef __OVERFLOW_XOP
11274+#undef __OVERFLOW_IOP
11275+#undef __REFCOUNT_OP
11276
11277 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11278 BACKOFF_SETUP(%o2)
11279diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11280index 1d649a9..fbc5bfc 100644
11281--- a/arch/sparc/lib/ksyms.c
11282+++ b/arch/sparc/lib/ksyms.c
11283@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11284 /* Atomic counter implementation. */
11285 #define ATOMIC_OP(op) \
11286 EXPORT_SYMBOL(atomic_##op); \
11287-EXPORT_SYMBOL(atomic64_##op);
11288+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11289+EXPORT_SYMBOL(atomic64_##op); \
11290+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11291
11292 #define ATOMIC_OP_RETURN(op) \
11293 EXPORT_SYMBOL(atomic_##op##_return); \
11294@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11295 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11296
11297 ATOMIC_OPS(add)
11298+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11299+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11300 ATOMIC_OPS(sub)
11301
11302 #undef ATOMIC_OPS
11303diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11304index 30c3ecc..736f015 100644
11305--- a/arch/sparc/mm/Makefile
11306+++ b/arch/sparc/mm/Makefile
11307@@ -2,7 +2,7 @@
11308 #
11309
11310 asflags-y := -ansi
11311-ccflags-y := -Werror
11312+#ccflags-y := -Werror
11313
11314 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11315 obj-y += fault_$(BITS).o
11316diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11317index 70d8171..274c6c0 100644
11318--- a/arch/sparc/mm/fault_32.c
11319+++ b/arch/sparc/mm/fault_32.c
11320@@ -21,6 +21,9 @@
11321 #include <linux/perf_event.h>
11322 #include <linux/interrupt.h>
11323 #include <linux/kdebug.h>
11324+#include <linux/slab.h>
11325+#include <linux/pagemap.h>
11326+#include <linux/compiler.h>
11327
11328 #include <asm/page.h>
11329 #include <asm/pgtable.h>
11330@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11331 return safe_compute_effective_address(regs, insn);
11332 }
11333
11334+#ifdef CONFIG_PAX_PAGEEXEC
11335+#ifdef CONFIG_PAX_DLRESOLVE
11336+static void pax_emuplt_close(struct vm_area_struct *vma)
11337+{
11338+ vma->vm_mm->call_dl_resolve = 0UL;
11339+}
11340+
11341+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11342+{
11343+ unsigned int *kaddr;
11344+
11345+ vmf->page = alloc_page(GFP_HIGHUSER);
11346+ if (!vmf->page)
11347+ return VM_FAULT_OOM;
11348+
11349+ kaddr = kmap(vmf->page);
11350+ memset(kaddr, 0, PAGE_SIZE);
11351+ kaddr[0] = 0x9DE3BFA8U; /* save */
11352+ flush_dcache_page(vmf->page);
11353+ kunmap(vmf->page);
11354+ return VM_FAULT_MAJOR;
11355+}
11356+
11357+static const struct vm_operations_struct pax_vm_ops = {
11358+ .close = pax_emuplt_close,
11359+ .fault = pax_emuplt_fault
11360+};
11361+
11362+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11363+{
11364+ int ret;
11365+
11366+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11367+ vma->vm_mm = current->mm;
11368+ vma->vm_start = addr;
11369+ vma->vm_end = addr + PAGE_SIZE;
11370+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11371+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11372+ vma->vm_ops = &pax_vm_ops;
11373+
11374+ ret = insert_vm_struct(current->mm, vma);
11375+ if (ret)
11376+ return ret;
11377+
11378+ ++current->mm->total_vm;
11379+ return 0;
11380+}
11381+#endif
11382+
11383+/*
11384+ * PaX: decide what to do with offenders (regs->pc = fault address)
11385+ *
11386+ * returns 1 when task should be killed
11387+ * 2 when patched PLT trampoline was detected
11388+ * 3 when unpatched PLT trampoline was detected
11389+ */
11390+static int pax_handle_fetch_fault(struct pt_regs *regs)
11391+{
11392+
11393+#ifdef CONFIG_PAX_EMUPLT
11394+ int err;
11395+
11396+ do { /* PaX: patched PLT emulation #1 */
11397+ unsigned int sethi1, sethi2, jmpl;
11398+
11399+ err = get_user(sethi1, (unsigned int *)regs->pc);
11400+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11401+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11402+
11403+ if (err)
11404+ break;
11405+
11406+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11407+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11408+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11409+ {
11410+ unsigned int addr;
11411+
11412+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11413+ addr = regs->u_regs[UREG_G1];
11414+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11415+ regs->pc = addr;
11416+ regs->npc = addr+4;
11417+ return 2;
11418+ }
11419+ } while (0);
11420+
11421+ do { /* PaX: patched PLT emulation #2 */
11422+ unsigned int ba;
11423+
11424+ err = get_user(ba, (unsigned int *)regs->pc);
11425+
11426+ if (err)
11427+ break;
11428+
11429+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11430+ unsigned int addr;
11431+
11432+ if ((ba & 0xFFC00000U) == 0x30800000U)
11433+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11434+ else
11435+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11436+ regs->pc = addr;
11437+ regs->npc = addr+4;
11438+ return 2;
11439+ }
11440+ } while (0);
11441+
11442+ do { /* PaX: patched PLT emulation #3 */
11443+ unsigned int sethi, bajmpl, nop;
11444+
11445+ err = get_user(sethi, (unsigned int *)regs->pc);
11446+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11447+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11448+
11449+ if (err)
11450+ break;
11451+
11452+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11453+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11454+ nop == 0x01000000U)
11455+ {
11456+ unsigned int addr;
11457+
11458+ addr = (sethi & 0x003FFFFFU) << 10;
11459+ regs->u_regs[UREG_G1] = addr;
11460+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11461+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11462+ else
11463+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11464+ regs->pc = addr;
11465+ regs->npc = addr+4;
11466+ return 2;
11467+ }
11468+ } while (0);
11469+
11470+ do { /* PaX: unpatched PLT emulation step 1 */
11471+ unsigned int sethi, ba, nop;
11472+
11473+ err = get_user(sethi, (unsigned int *)regs->pc);
11474+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11475+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11476+
11477+ if (err)
11478+ break;
11479+
11480+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11481+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11482+ nop == 0x01000000U)
11483+ {
11484+ unsigned int addr, save, call;
11485+
11486+ if ((ba & 0xFFC00000U) == 0x30800000U)
11487+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11488+ else
11489+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11490+
11491+ err = get_user(save, (unsigned int *)addr);
11492+ err |= get_user(call, (unsigned int *)(addr+4));
11493+ err |= get_user(nop, (unsigned int *)(addr+8));
11494+ if (err)
11495+ break;
11496+
11497+#ifdef CONFIG_PAX_DLRESOLVE
11498+ if (save == 0x9DE3BFA8U &&
11499+ (call & 0xC0000000U) == 0x40000000U &&
11500+ nop == 0x01000000U)
11501+ {
11502+ struct vm_area_struct *vma;
11503+ unsigned long call_dl_resolve;
11504+
11505+ down_read(&current->mm->mmap_sem);
11506+ call_dl_resolve = current->mm->call_dl_resolve;
11507+ up_read(&current->mm->mmap_sem);
11508+ if (likely(call_dl_resolve))
11509+ goto emulate;
11510+
11511+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11512+
11513+ down_write(&current->mm->mmap_sem);
11514+ if (current->mm->call_dl_resolve) {
11515+ call_dl_resolve = current->mm->call_dl_resolve;
11516+ up_write(&current->mm->mmap_sem);
11517+ if (vma)
11518+ kmem_cache_free(vm_area_cachep, vma);
11519+ goto emulate;
11520+ }
11521+
11522+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11523+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11524+ up_write(&current->mm->mmap_sem);
11525+ if (vma)
11526+ kmem_cache_free(vm_area_cachep, vma);
11527+ return 1;
11528+ }
11529+
11530+ if (pax_insert_vma(vma, call_dl_resolve)) {
11531+ up_write(&current->mm->mmap_sem);
11532+ kmem_cache_free(vm_area_cachep, vma);
11533+ return 1;
11534+ }
11535+
11536+ current->mm->call_dl_resolve = call_dl_resolve;
11537+ up_write(&current->mm->mmap_sem);
11538+
11539+emulate:
11540+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11541+ regs->pc = call_dl_resolve;
11542+ regs->npc = addr+4;
11543+ return 3;
11544+ }
11545+#endif
11546+
11547+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11548+ if ((save & 0xFFC00000U) == 0x05000000U &&
11549+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11550+ nop == 0x01000000U)
11551+ {
11552+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11553+ regs->u_regs[UREG_G2] = addr + 4;
11554+ addr = (save & 0x003FFFFFU) << 10;
11555+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11556+ regs->pc = addr;
11557+ regs->npc = addr+4;
11558+ return 3;
11559+ }
11560+ }
11561+ } while (0);
11562+
11563+ do { /* PaX: unpatched PLT emulation step 2 */
11564+ unsigned int save, call, nop;
11565+
11566+ err = get_user(save, (unsigned int *)(regs->pc-4));
11567+ err |= get_user(call, (unsigned int *)regs->pc);
11568+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11569+ if (err)
11570+ break;
11571+
11572+ if (save == 0x9DE3BFA8U &&
11573+ (call & 0xC0000000U) == 0x40000000U &&
11574+ nop == 0x01000000U)
11575+ {
11576+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11577+
11578+ regs->u_regs[UREG_RETPC] = regs->pc;
11579+ regs->pc = dl_resolve;
11580+ regs->npc = dl_resolve+4;
11581+ return 3;
11582+ }
11583+ } while (0);
11584+#endif
11585+
11586+ return 1;
11587+}
11588+
11589+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11590+{
11591+ unsigned long i;
11592+
11593+ printk(KERN_ERR "PAX: bytes at PC: ");
11594+ for (i = 0; i < 8; i++) {
11595+ unsigned int c;
11596+ if (get_user(c, (unsigned int *)pc+i))
11597+ printk(KERN_CONT "???????? ");
11598+ else
11599+ printk(KERN_CONT "%08x ", c);
11600+ }
11601+ printk("\n");
11602+}
11603+#endif
11604+
11605 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11606 int text_fault)
11607 {
11608@@ -226,6 +500,24 @@ good_area:
11609 if (!(vma->vm_flags & VM_WRITE))
11610 goto bad_area;
11611 } else {
11612+
11613+#ifdef CONFIG_PAX_PAGEEXEC
11614+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11615+ up_read(&mm->mmap_sem);
11616+ switch (pax_handle_fetch_fault(regs)) {
11617+
11618+#ifdef CONFIG_PAX_EMUPLT
11619+ case 2:
11620+ case 3:
11621+ return;
11622+#endif
11623+
11624+ }
11625+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11626+ do_group_exit(SIGKILL);
11627+ }
11628+#endif
11629+
11630 /* Allow reads even for write-only mappings */
11631 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11632 goto bad_area;
11633diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11634index 4798232..f76e3aa 100644
11635--- a/arch/sparc/mm/fault_64.c
11636+++ b/arch/sparc/mm/fault_64.c
11637@@ -22,6 +22,9 @@
11638 #include <linux/kdebug.h>
11639 #include <linux/percpu.h>
11640 #include <linux/context_tracking.h>
11641+#include <linux/slab.h>
11642+#include <linux/pagemap.h>
11643+#include <linux/compiler.h>
11644
11645 #include <asm/page.h>
11646 #include <asm/pgtable.h>
11647@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11648 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11649 regs->tpc);
11650 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11651- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11652+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11653 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11654 dump_stack();
11655 unhandled_fault(regs->tpc, current, regs);
11656@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11657 show_regs(regs);
11658 }
11659
11660+#ifdef CONFIG_PAX_PAGEEXEC
11661+#ifdef CONFIG_PAX_DLRESOLVE
11662+static void pax_emuplt_close(struct vm_area_struct *vma)
11663+{
11664+ vma->vm_mm->call_dl_resolve = 0UL;
11665+}
11666+
11667+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11668+{
11669+ unsigned int *kaddr;
11670+
11671+ vmf->page = alloc_page(GFP_HIGHUSER);
11672+ if (!vmf->page)
11673+ return VM_FAULT_OOM;
11674+
11675+ kaddr = kmap(vmf->page);
11676+ memset(kaddr, 0, PAGE_SIZE);
11677+ kaddr[0] = 0x9DE3BFA8U; /* save */
11678+ flush_dcache_page(vmf->page);
11679+ kunmap(vmf->page);
11680+ return VM_FAULT_MAJOR;
11681+}
11682+
11683+static const struct vm_operations_struct pax_vm_ops = {
11684+ .close = pax_emuplt_close,
11685+ .fault = pax_emuplt_fault
11686+};
11687+
11688+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11689+{
11690+ int ret;
11691+
11692+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11693+ vma->vm_mm = current->mm;
11694+ vma->vm_start = addr;
11695+ vma->vm_end = addr + PAGE_SIZE;
11696+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11697+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11698+ vma->vm_ops = &pax_vm_ops;
11699+
11700+ ret = insert_vm_struct(current->mm, vma);
11701+ if (ret)
11702+ return ret;
11703+
11704+ ++current->mm->total_vm;
11705+ return 0;
11706+}
11707+#endif
11708+
11709+/*
11710+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11711+ *
11712+ * returns 1 when task should be killed
11713+ * 2 when patched PLT trampoline was detected
11714+ * 3 when unpatched PLT trampoline was detected
11715+ */
11716+static int pax_handle_fetch_fault(struct pt_regs *regs)
11717+{
11718+
11719+#ifdef CONFIG_PAX_EMUPLT
11720+ int err;
11721+
11722+ do { /* PaX: patched PLT emulation #1 */
11723+ unsigned int sethi1, sethi2, jmpl;
11724+
11725+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11726+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11727+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11728+
11729+ if (err)
11730+ break;
11731+
11732+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11733+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11734+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11735+ {
11736+ unsigned long addr;
11737+
11738+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11739+ addr = regs->u_regs[UREG_G1];
11740+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11741+
11742+ if (test_thread_flag(TIF_32BIT))
11743+ addr &= 0xFFFFFFFFUL;
11744+
11745+ regs->tpc = addr;
11746+ regs->tnpc = addr+4;
11747+ return 2;
11748+ }
11749+ } while (0);
11750+
11751+ do { /* PaX: patched PLT emulation #2 */
11752+ unsigned int ba;
11753+
11754+ err = get_user(ba, (unsigned int *)regs->tpc);
11755+
11756+ if (err)
11757+ break;
11758+
11759+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11760+ unsigned long addr;
11761+
11762+ if ((ba & 0xFFC00000U) == 0x30800000U)
11763+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11764+ else
11765+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11766+
11767+ if (test_thread_flag(TIF_32BIT))
11768+ addr &= 0xFFFFFFFFUL;
11769+
11770+ regs->tpc = addr;
11771+ regs->tnpc = addr+4;
11772+ return 2;
11773+ }
11774+ } while (0);
11775+
11776+ do { /* PaX: patched PLT emulation #3 */
11777+ unsigned int sethi, bajmpl, nop;
11778+
11779+ err = get_user(sethi, (unsigned int *)regs->tpc);
11780+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11781+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11782+
11783+ if (err)
11784+ break;
11785+
11786+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11787+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11788+ nop == 0x01000000U)
11789+ {
11790+ unsigned long addr;
11791+
11792+ addr = (sethi & 0x003FFFFFU) << 10;
11793+ regs->u_regs[UREG_G1] = addr;
11794+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11795+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11796+ else
11797+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11798+
11799+ if (test_thread_flag(TIF_32BIT))
11800+ addr &= 0xFFFFFFFFUL;
11801+
11802+ regs->tpc = addr;
11803+ regs->tnpc = addr+4;
11804+ return 2;
11805+ }
11806+ } while (0);
11807+
11808+ do { /* PaX: patched PLT emulation #4 */
11809+ unsigned int sethi, mov1, call, mov2;
11810+
11811+ err = get_user(sethi, (unsigned int *)regs->tpc);
11812+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11814+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11815+
11816+ if (err)
11817+ break;
11818+
11819+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11820+ mov1 == 0x8210000FU &&
11821+ (call & 0xC0000000U) == 0x40000000U &&
11822+ mov2 == 0x9E100001U)
11823+ {
11824+ unsigned long addr;
11825+
11826+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11827+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11828+
11829+ if (test_thread_flag(TIF_32BIT))
11830+ addr &= 0xFFFFFFFFUL;
11831+
11832+ regs->tpc = addr;
11833+ regs->tnpc = addr+4;
11834+ return 2;
11835+ }
11836+ } while (0);
11837+
11838+ do { /* PaX: patched PLT emulation #5 */
11839+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11840+
11841+ err = get_user(sethi, (unsigned int *)regs->tpc);
11842+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11843+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11844+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11845+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11846+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11847+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11848+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11849+
11850+ if (err)
11851+ break;
11852+
11853+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11854+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11855+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11856+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11857+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11858+ sllx == 0x83287020U &&
11859+ jmpl == 0x81C04005U &&
11860+ nop == 0x01000000U)
11861+ {
11862+ unsigned long addr;
11863+
11864+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11865+ regs->u_regs[UREG_G1] <<= 32;
11866+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11867+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11868+ regs->tpc = addr;
11869+ regs->tnpc = addr+4;
11870+ return 2;
11871+ }
11872+ } while (0);
11873+
11874+ do { /* PaX: patched PLT emulation #6 */
11875+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11876+
11877+ err = get_user(sethi, (unsigned int *)regs->tpc);
11878+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11879+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11880+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11881+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11882+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11883+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11884+
11885+ if (err)
11886+ break;
11887+
11888+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11889+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11890+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11891+ sllx == 0x83287020U &&
11892+ (or & 0xFFFFE000U) == 0x8A116000U &&
11893+ jmpl == 0x81C04005U &&
11894+ nop == 0x01000000U)
11895+ {
11896+ unsigned long addr;
11897+
11898+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11899+ regs->u_regs[UREG_G1] <<= 32;
11900+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11901+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11902+ regs->tpc = addr;
11903+ regs->tnpc = addr+4;
11904+ return 2;
11905+ }
11906+ } while (0);
11907+
11908+ do { /* PaX: unpatched PLT emulation step 1 */
11909+ unsigned int sethi, ba, nop;
11910+
11911+ err = get_user(sethi, (unsigned int *)regs->tpc);
11912+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11913+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11914+
11915+ if (err)
11916+ break;
11917+
11918+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11919+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11920+ nop == 0x01000000U)
11921+ {
11922+ unsigned long addr;
11923+ unsigned int save, call;
11924+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11925+
11926+ if ((ba & 0xFFC00000U) == 0x30800000U)
11927+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11928+ else
11929+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11930+
11931+ if (test_thread_flag(TIF_32BIT))
11932+ addr &= 0xFFFFFFFFUL;
11933+
11934+ err = get_user(save, (unsigned int *)addr);
11935+ err |= get_user(call, (unsigned int *)(addr+4));
11936+ err |= get_user(nop, (unsigned int *)(addr+8));
11937+ if (err)
11938+ break;
11939+
11940+#ifdef CONFIG_PAX_DLRESOLVE
11941+ if (save == 0x9DE3BFA8U &&
11942+ (call & 0xC0000000U) == 0x40000000U &&
11943+ nop == 0x01000000U)
11944+ {
11945+ struct vm_area_struct *vma;
11946+ unsigned long call_dl_resolve;
11947+
11948+ down_read(&current->mm->mmap_sem);
11949+ call_dl_resolve = current->mm->call_dl_resolve;
11950+ up_read(&current->mm->mmap_sem);
11951+ if (likely(call_dl_resolve))
11952+ goto emulate;
11953+
11954+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11955+
11956+ down_write(&current->mm->mmap_sem);
11957+ if (current->mm->call_dl_resolve) {
11958+ call_dl_resolve = current->mm->call_dl_resolve;
11959+ up_write(&current->mm->mmap_sem);
11960+ if (vma)
11961+ kmem_cache_free(vm_area_cachep, vma);
11962+ goto emulate;
11963+ }
11964+
11965+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11966+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11967+ up_write(&current->mm->mmap_sem);
11968+ if (vma)
11969+ kmem_cache_free(vm_area_cachep, vma);
11970+ return 1;
11971+ }
11972+
11973+ if (pax_insert_vma(vma, call_dl_resolve)) {
11974+ up_write(&current->mm->mmap_sem);
11975+ kmem_cache_free(vm_area_cachep, vma);
11976+ return 1;
11977+ }
11978+
11979+ current->mm->call_dl_resolve = call_dl_resolve;
11980+ up_write(&current->mm->mmap_sem);
11981+
11982+emulate:
11983+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11984+ regs->tpc = call_dl_resolve;
11985+ regs->tnpc = addr+4;
11986+ return 3;
11987+ }
11988+#endif
11989+
11990+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11991+ if ((save & 0xFFC00000U) == 0x05000000U &&
11992+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11993+ nop == 0x01000000U)
11994+ {
11995+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11996+ regs->u_regs[UREG_G2] = addr + 4;
11997+ addr = (save & 0x003FFFFFU) << 10;
11998+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11999+
12000+ if (test_thread_flag(TIF_32BIT))
12001+ addr &= 0xFFFFFFFFUL;
12002+
12003+ regs->tpc = addr;
12004+ regs->tnpc = addr+4;
12005+ return 3;
12006+ }
12007+
12008+ /* PaX: 64-bit PLT stub */
12009+ err = get_user(sethi1, (unsigned int *)addr);
12010+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12011+ err |= get_user(or1, (unsigned int *)(addr+8));
12012+ err |= get_user(or2, (unsigned int *)(addr+12));
12013+ err |= get_user(sllx, (unsigned int *)(addr+16));
12014+ err |= get_user(add, (unsigned int *)(addr+20));
12015+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12016+ err |= get_user(nop, (unsigned int *)(addr+28));
12017+ if (err)
12018+ break;
12019+
12020+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12021+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12022+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12023+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12024+ sllx == 0x89293020U &&
12025+ add == 0x8A010005U &&
12026+ jmpl == 0x89C14000U &&
12027+ nop == 0x01000000U)
12028+ {
12029+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12030+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12031+ regs->u_regs[UREG_G4] <<= 32;
12032+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12033+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12034+ regs->u_regs[UREG_G4] = addr + 24;
12035+ addr = regs->u_regs[UREG_G5];
12036+ regs->tpc = addr;
12037+ regs->tnpc = addr+4;
12038+ return 3;
12039+ }
12040+ }
12041+ } while (0);
12042+
12043+#ifdef CONFIG_PAX_DLRESOLVE
12044+ do { /* PaX: unpatched PLT emulation step 2 */
12045+ unsigned int save, call, nop;
12046+
12047+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12048+ err |= get_user(call, (unsigned int *)regs->tpc);
12049+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12050+ if (err)
12051+ break;
12052+
12053+ if (save == 0x9DE3BFA8U &&
12054+ (call & 0xC0000000U) == 0x40000000U &&
12055+ nop == 0x01000000U)
12056+ {
12057+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12058+
12059+ if (test_thread_flag(TIF_32BIT))
12060+ dl_resolve &= 0xFFFFFFFFUL;
12061+
12062+ regs->u_regs[UREG_RETPC] = regs->tpc;
12063+ regs->tpc = dl_resolve;
12064+ regs->tnpc = dl_resolve+4;
12065+ return 3;
12066+ }
12067+ } while (0);
12068+#endif
12069+
12070+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12071+ unsigned int sethi, ba, nop;
12072+
12073+ err = get_user(sethi, (unsigned int *)regs->tpc);
12074+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12075+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12076+
12077+ if (err)
12078+ break;
12079+
12080+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12081+ (ba & 0xFFF00000U) == 0x30600000U &&
12082+ nop == 0x01000000U)
12083+ {
12084+ unsigned long addr;
12085+
12086+ addr = (sethi & 0x003FFFFFU) << 10;
12087+ regs->u_regs[UREG_G1] = addr;
12088+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12089+
12090+ if (test_thread_flag(TIF_32BIT))
12091+ addr &= 0xFFFFFFFFUL;
12092+
12093+ regs->tpc = addr;
12094+ regs->tnpc = addr+4;
12095+ return 2;
12096+ }
12097+ } while (0);
12098+
12099+#endif
12100+
12101+ return 1;
12102+}
12103+
12104+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12105+{
12106+ unsigned long i;
12107+
12108+ printk(KERN_ERR "PAX: bytes at PC: ");
12109+ for (i = 0; i < 8; i++) {
12110+ unsigned int c;
12111+ if (get_user(c, (unsigned int *)pc+i))
12112+ printk(KERN_CONT "???????? ");
12113+ else
12114+ printk(KERN_CONT "%08x ", c);
12115+ }
12116+ printk("\n");
12117+}
12118+#endif
12119+
12120 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12121 {
12122 enum ctx_state prev_state = exception_enter();
12123@@ -353,6 +816,29 @@ retry:
12124 if (!vma)
12125 goto bad_area;
12126
12127+#ifdef CONFIG_PAX_PAGEEXEC
12128+ /* PaX: detect ITLB misses on non-exec pages */
12129+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12130+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12131+ {
12132+ if (address != regs->tpc)
12133+ goto good_area;
12134+
12135+ up_read(&mm->mmap_sem);
12136+ switch (pax_handle_fetch_fault(regs)) {
12137+
12138+#ifdef CONFIG_PAX_EMUPLT
12139+ case 2:
12140+ case 3:
12141+ return;
12142+#endif
12143+
12144+ }
12145+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12146+ do_group_exit(SIGKILL);
12147+ }
12148+#endif
12149+
12150 /* Pure DTLB misses do not tell us whether the fault causing
12151 * load/store/atomic was a write or not, it only says that there
12152 * was no match. So in such a case we (carefully) read the
12153diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12154index 4242eab..9ae6360 100644
12155--- a/arch/sparc/mm/hugetlbpage.c
12156+++ b/arch/sparc/mm/hugetlbpage.c
12157@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12158 unsigned long addr,
12159 unsigned long len,
12160 unsigned long pgoff,
12161- unsigned long flags)
12162+ unsigned long flags,
12163+ unsigned long offset)
12164 {
12165+ struct mm_struct *mm = current->mm;
12166 unsigned long task_size = TASK_SIZE;
12167 struct vm_unmapped_area_info info;
12168
12169@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12170
12171 info.flags = 0;
12172 info.length = len;
12173- info.low_limit = TASK_UNMAPPED_BASE;
12174+ info.low_limit = mm->mmap_base;
12175 info.high_limit = min(task_size, VA_EXCLUDE_START);
12176 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12177 info.align_offset = 0;
12178+ info.threadstack_offset = offset;
12179 addr = vm_unmapped_area(&info);
12180
12181 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12182 VM_BUG_ON(addr != -ENOMEM);
12183 info.low_limit = VA_EXCLUDE_END;
12184+
12185+#ifdef CONFIG_PAX_RANDMMAP
12186+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12187+ info.low_limit += mm->delta_mmap;
12188+#endif
12189+
12190 info.high_limit = task_size;
12191 addr = vm_unmapped_area(&info);
12192 }
12193@@ -55,7 +64,8 @@ static unsigned long
12194 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12195 const unsigned long len,
12196 const unsigned long pgoff,
12197- const unsigned long flags)
12198+ const unsigned long flags,
12199+ const unsigned long offset)
12200 {
12201 struct mm_struct *mm = current->mm;
12202 unsigned long addr = addr0;
12203@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12204 info.high_limit = mm->mmap_base;
12205 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12206 info.align_offset = 0;
12207+ info.threadstack_offset = offset;
12208 addr = vm_unmapped_area(&info);
12209
12210 /*
12211@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12212 VM_BUG_ON(addr != -ENOMEM);
12213 info.flags = 0;
12214 info.low_limit = TASK_UNMAPPED_BASE;
12215+
12216+#ifdef CONFIG_PAX_RANDMMAP
12217+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12218+ info.low_limit += mm->delta_mmap;
12219+#endif
12220+
12221 info.high_limit = STACK_TOP32;
12222 addr = vm_unmapped_area(&info);
12223 }
12224@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12225 struct mm_struct *mm = current->mm;
12226 struct vm_area_struct *vma;
12227 unsigned long task_size = TASK_SIZE;
12228+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12229
12230 if (test_thread_flag(TIF_32BIT))
12231 task_size = STACK_TOP32;
12232@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12233 return addr;
12234 }
12235
12236+#ifdef CONFIG_PAX_RANDMMAP
12237+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12238+#endif
12239+
12240 if (addr) {
12241 addr = ALIGN(addr, HPAGE_SIZE);
12242 vma = find_vma(mm, addr);
12243- if (task_size - len >= addr &&
12244- (!vma || addr + len <= vma->vm_start))
12245+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12246 return addr;
12247 }
12248 if (mm->get_unmapped_area == arch_get_unmapped_area)
12249 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12250- pgoff, flags);
12251+ pgoff, flags, offset);
12252 else
12253 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12254- pgoff, flags);
12255+ pgoff, flags, offset);
12256 }
12257
12258 pte_t *huge_pte_alloc(struct mm_struct *mm,
12259diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12260index 4ca0d6b..e89bca1 100644
12261--- a/arch/sparc/mm/init_64.c
12262+++ b/arch/sparc/mm/init_64.c
12263@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12264 int num_kernel_image_mappings;
12265
12266 #ifdef CONFIG_DEBUG_DCFLUSH
12267-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12268+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12269 #ifdef CONFIG_SMP
12270-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12271+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12272 #endif
12273 #endif
12274
12275@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12276 {
12277 BUG_ON(tlb_type == hypervisor);
12278 #ifdef CONFIG_DEBUG_DCFLUSH
12279- atomic_inc(&dcpage_flushes);
12280+ atomic_inc_unchecked(&dcpage_flushes);
12281 #endif
12282
12283 #ifdef DCACHE_ALIASING_POSSIBLE
12284@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12285
12286 #ifdef CONFIG_DEBUG_DCFLUSH
12287 seq_printf(m, "DCPageFlushes\t: %d\n",
12288- atomic_read(&dcpage_flushes));
12289+ atomic_read_unchecked(&dcpage_flushes));
12290 #ifdef CONFIG_SMP
12291 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12292- atomic_read(&dcpage_flushes_xcall));
12293+ atomic_read_unchecked(&dcpage_flushes_xcall));
12294 #endif /* CONFIG_SMP */
12295 #endif /* CONFIG_DEBUG_DCFLUSH */
12296 }
12297diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12298index 7cca418..53fc030 100644
12299--- a/arch/tile/Kconfig
12300+++ b/arch/tile/Kconfig
12301@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12302
12303 config KEXEC
12304 bool "kexec system call"
12305+ depends on !GRKERNSEC_KMEM
12306 ---help---
12307 kexec is a system call that implements the ability to shutdown your
12308 current kernel, and to start another kernel. It is like a reboot
12309diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12310index 7b11c5f..755a026 100644
12311--- a/arch/tile/include/asm/atomic_64.h
12312+++ b/arch/tile/include/asm/atomic_64.h
12313@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12314
12315 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12316
12317+#define atomic64_read_unchecked(v) atomic64_read(v)
12318+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12319+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12320+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12321+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12322+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12323+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12324+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12325+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12326+
12327 /* Define this to indicate that cmpxchg is an efficient operation. */
12328 #define __HAVE_ARCH_CMPXCHG
12329
12330diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12331index 6160761..00cac88 100644
12332--- a/arch/tile/include/asm/cache.h
12333+++ b/arch/tile/include/asm/cache.h
12334@@ -15,11 +15,12 @@
12335 #ifndef _ASM_TILE_CACHE_H
12336 #define _ASM_TILE_CACHE_H
12337
12338+#include <linux/const.h>
12339 #include <arch/chip.h>
12340
12341 /* bytes per L1 data cache line */
12342 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12343-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12344+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12345
12346 /* bytes per L2 cache line */
12347 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12348diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12349index f41cb53..31d3ab4 100644
12350--- a/arch/tile/include/asm/uaccess.h
12351+++ b/arch/tile/include/asm/uaccess.h
12352@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12353 const void __user *from,
12354 unsigned long n)
12355 {
12356- int sz = __compiletime_object_size(to);
12357+ size_t sz = __compiletime_object_size(to);
12358
12359- if (likely(sz == -1 || sz >= n))
12360+ if (likely(sz == (size_t)-1 || sz >= n))
12361 n = _copy_from_user(to, from, n);
12362 else
12363 copy_from_user_overflow();
12364diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12365index 8416240..a012fb7 100644
12366--- a/arch/tile/mm/hugetlbpage.c
12367+++ b/arch/tile/mm/hugetlbpage.c
12368@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12369 info.high_limit = TASK_SIZE;
12370 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12371 info.align_offset = 0;
12372+ info.threadstack_offset = 0;
12373 return vm_unmapped_area(&info);
12374 }
12375
12376@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12377 info.high_limit = current->mm->mmap_base;
12378 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12379 info.align_offset = 0;
12380+ info.threadstack_offset = 0;
12381 addr = vm_unmapped_area(&info);
12382
12383 /*
12384diff --git a/arch/um/Makefile b/arch/um/Makefile
12385index e4b1a96..16162f8 100644
12386--- a/arch/um/Makefile
12387+++ b/arch/um/Makefile
12388@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12389 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12390 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12391
12392+ifdef CONSTIFY_PLUGIN
12393+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12394+endif
12395+
12396 #This will adjust *FLAGS accordingly to the platform.
12397 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12398
12399diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12400index 19e1bdd..3665b77 100644
12401--- a/arch/um/include/asm/cache.h
12402+++ b/arch/um/include/asm/cache.h
12403@@ -1,6 +1,7 @@
12404 #ifndef __UM_CACHE_H
12405 #define __UM_CACHE_H
12406
12407+#include <linux/const.h>
12408
12409 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12410 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12411@@ -12,6 +13,6 @@
12412 # define L1_CACHE_SHIFT 5
12413 #endif
12414
12415-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12416+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12417
12418 #endif
12419diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12420index 2e0a6b1..a64d0f5 100644
12421--- a/arch/um/include/asm/kmap_types.h
12422+++ b/arch/um/include/asm/kmap_types.h
12423@@ -8,6 +8,6 @@
12424
12425 /* No more #include "asm/arch/kmap_types.h" ! */
12426
12427-#define KM_TYPE_NR 14
12428+#define KM_TYPE_NR 15
12429
12430 #endif
12431diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12432index 71c5d13..4c7b9f1 100644
12433--- a/arch/um/include/asm/page.h
12434+++ b/arch/um/include/asm/page.h
12435@@ -14,6 +14,9 @@
12436 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12437 #define PAGE_MASK (~(PAGE_SIZE-1))
12438
12439+#define ktla_ktva(addr) (addr)
12440+#define ktva_ktla(addr) (addr)
12441+
12442 #ifndef __ASSEMBLY__
12443
12444 struct page;
12445diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12446index 2b4274e..754fe06 100644
12447--- a/arch/um/include/asm/pgtable-3level.h
12448+++ b/arch/um/include/asm/pgtable-3level.h
12449@@ -58,6 +58,7 @@
12450 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12451 #define pud_populate(mm, pud, pmd) \
12452 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12453+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12454
12455 #ifdef CONFIG_64BIT
12456 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12457diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12458index f17bca8..48adb87 100644
12459--- a/arch/um/kernel/process.c
12460+++ b/arch/um/kernel/process.c
12461@@ -356,22 +356,6 @@ int singlestepping(void * t)
12462 return 2;
12463 }
12464
12465-/*
12466- * Only x86 and x86_64 have an arch_align_stack().
12467- * All other arches have "#define arch_align_stack(x) (x)"
12468- * in their asm/exec.h
12469- * As this is included in UML from asm-um/system-generic.h,
12470- * we can use it to behave as the subarch does.
12471- */
12472-#ifndef arch_align_stack
12473-unsigned long arch_align_stack(unsigned long sp)
12474-{
12475- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12476- sp -= get_random_int() % 8192;
12477- return sp & ~0xf;
12478-}
12479-#endif
12480-
12481 unsigned long get_wchan(struct task_struct *p)
12482 {
12483 unsigned long stack_page, sp, ip;
12484diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12485index ad8f795..2c7eec6 100644
12486--- a/arch/unicore32/include/asm/cache.h
12487+++ b/arch/unicore32/include/asm/cache.h
12488@@ -12,8 +12,10 @@
12489 #ifndef __UNICORE_CACHE_H__
12490 #define __UNICORE_CACHE_H__
12491
12492-#define L1_CACHE_SHIFT (5)
12493-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12494+#include <linux/const.h>
12495+
12496+#define L1_CACHE_SHIFT 5
12497+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12498
12499 /*
12500 * Memory returned by kmalloc() may be used for DMA, so we must make
12501diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12502index b7d31ca..9481ec5 100644
12503--- a/arch/x86/Kconfig
12504+++ b/arch/x86/Kconfig
12505@@ -132,7 +132,7 @@ config X86
12506 select RTC_LIB
12507 select HAVE_DEBUG_STACKOVERFLOW
12508 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12509- select HAVE_CC_STACKPROTECTOR
12510+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12511 select GENERIC_CPU_AUTOPROBE
12512 select HAVE_ARCH_AUDITSYSCALL
12513 select ARCH_SUPPORTS_ATOMIC_RMW
12514@@ -266,7 +266,7 @@ config X86_HT
12515
12516 config X86_32_LAZY_GS
12517 def_bool y
12518- depends on X86_32 && !CC_STACKPROTECTOR
12519+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12520
12521 config ARCH_HWEIGHT_CFLAGS
12522 string
12523@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
12524
12525 menuconfig HYPERVISOR_GUEST
12526 bool "Linux guest support"
12527+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12528 ---help---
12529 Say Y here to enable options for running Linux under various hyper-
12530 visors. This option enables basic hypervisor detection and platform
12531@@ -1013,6 +1014,7 @@ config VM86
12532
12533 config X86_16BIT
12534 bool "Enable support for 16-bit segments" if EXPERT
12535+ depends on !GRKERNSEC
12536 default y
12537 ---help---
12538 This option is required by programs like Wine to run 16-bit
12539@@ -1186,6 +1188,7 @@ choice
12540
12541 config NOHIGHMEM
12542 bool "off"
12543+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12544 ---help---
12545 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12546 However, the address space of 32-bit x86 processors is only 4
12547@@ -1222,6 +1225,7 @@ config NOHIGHMEM
12548
12549 config HIGHMEM4G
12550 bool "4GB"
12551+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12552 ---help---
12553 Select this if you have a 32-bit processor and between 1 and 4
12554 gigabytes of physical RAM.
12555@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
12556 hex
12557 default 0xB0000000 if VMSPLIT_3G_OPT
12558 default 0x80000000 if VMSPLIT_2G
12559- default 0x78000000 if VMSPLIT_2G_OPT
12560+ default 0x70000000 if VMSPLIT_2G_OPT
12561 default 0x40000000 if VMSPLIT_1G
12562 default 0xC0000000
12563 depends on X86_32
12564@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
12565
12566 config KEXEC
12567 bool "kexec system call"
12568+ depends on !GRKERNSEC_KMEM
12569 ---help---
12570 kexec is a system call that implements the ability to shutdown your
12571 current kernel, and to start another kernel. It is like a reboot
12572@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
12573
12574 config PHYSICAL_ALIGN
12575 hex "Alignment value to which kernel should be aligned"
12576- default "0x200000"
12577+ default "0x1000000"
12578+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12579+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12580 range 0x2000 0x1000000 if X86_32
12581 range 0x200000 0x1000000 if X86_64
12582 ---help---
12583@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
12584 def_bool n
12585 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12586 depends on X86_32 || IA32_EMULATION
12587+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12588 ---help---
12589 Certain buggy versions of glibc will crash if they are
12590 presented with a 32-bit vDSO that is not mapped at the address
12591diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12592index 6983314..54ad7e8 100644
12593--- a/arch/x86/Kconfig.cpu
12594+++ b/arch/x86/Kconfig.cpu
12595@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12596
12597 config X86_F00F_BUG
12598 def_bool y
12599- depends on M586MMX || M586TSC || M586 || M486
12600+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12601
12602 config X86_INVD_BUG
12603 def_bool y
12604@@ -327,7 +327,7 @@ config X86_INVD_BUG
12605
12606 config X86_ALIGNMENT_16
12607 def_bool y
12608- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12609+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12610
12611 config X86_INTEL_USERCOPY
12612 def_bool y
12613@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12614 # generates cmov.
12615 config X86_CMOV
12616 def_bool y
12617- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12618+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12619
12620 config X86_MINIMUM_CPU_FAMILY
12621 int
12622diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12623index 20028da..88d5946 100644
12624--- a/arch/x86/Kconfig.debug
12625+++ b/arch/x86/Kconfig.debug
12626@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12627 config DEBUG_RODATA
12628 bool "Write protect kernel read-only data structures"
12629 default y
12630- depends on DEBUG_KERNEL
12631+ depends on DEBUG_KERNEL && BROKEN
12632 ---help---
12633 Mark the kernel read-only data as write-protected in the pagetables,
12634 in order to catch accidental (and incorrect) writes to such const
12635@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12636
12637 config DEBUG_SET_MODULE_RONX
12638 bool "Set loadable kernel module data as NX and text as RO"
12639- depends on MODULES
12640+ depends on MODULES && BROKEN
12641 ---help---
12642 This option helps catch unintended modifications to loadable
12643 kernel module's text and read-only data. It also prevents execution
12644diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12645index 5ba2d9c..41e5bb6 100644
12646--- a/arch/x86/Makefile
12647+++ b/arch/x86/Makefile
12648@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12649 # CPU-specific tuning. Anything which can be shared with UML should go here.
12650 include $(srctree)/arch/x86/Makefile_32.cpu
12651 KBUILD_CFLAGS += $(cflags-y)
12652-
12653- # temporary until string.h is fixed
12654- KBUILD_CFLAGS += -ffreestanding
12655 else
12656 BITS := 64
12657 UTS_MACHINE := x86_64
12658@@ -107,6 +104,9 @@ else
12659 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12660 endif
12661
12662+# temporary until string.h is fixed
12663+KBUILD_CFLAGS += -ffreestanding
12664+
12665 # Make sure compiler does not have buggy stack-protector support.
12666 ifdef CONFIG_CC_STACKPROTECTOR
12667 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12668@@ -181,6 +181,7 @@ archheaders:
12669 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12670
12671 archprepare:
12672+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12673 ifeq ($(CONFIG_KEXEC_FILE),y)
12674 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12675 endif
12676@@ -264,3 +265,9 @@ define archhelp
12677 echo ' FDARGS="..." arguments for the booted kernel'
12678 echo ' FDINITRD=file initrd for the booted kernel'
12679 endef
12680+
12681+define OLD_LD
12682+
12683+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12684+*** Please upgrade your binutils to 2.18 or newer
12685+endef
12686diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12687index 57bbf2f..b100fce 100644
12688--- a/arch/x86/boot/Makefile
12689+++ b/arch/x86/boot/Makefile
12690@@ -58,6 +58,9 @@ clean-files += cpustr.h
12691 # ---------------------------------------------------------------------------
12692
12693 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12694+ifdef CONSTIFY_PLUGIN
12695+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12696+endif
12697 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12698 GCOV_PROFILE := n
12699
12700diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12701index 878e4b9..20537ab 100644
12702--- a/arch/x86/boot/bitops.h
12703+++ b/arch/x86/boot/bitops.h
12704@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12705 u8 v;
12706 const u32 *p = (const u32 *)addr;
12707
12708- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12709+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12710 return v;
12711 }
12712
12713@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12714
12715 static inline void set_bit(int nr, void *addr)
12716 {
12717- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12718+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12719 }
12720
12721 #endif /* BOOT_BITOPS_H */
12722diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12723index bd49ec6..94c7f58 100644
12724--- a/arch/x86/boot/boot.h
12725+++ b/arch/x86/boot/boot.h
12726@@ -84,7 +84,7 @@ static inline void io_delay(void)
12727 static inline u16 ds(void)
12728 {
12729 u16 seg;
12730- asm("movw %%ds,%0" : "=rm" (seg));
12731+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12732 return seg;
12733 }
12734
12735diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12736index 0a291cd..9686efc 100644
12737--- a/arch/x86/boot/compressed/Makefile
12738+++ b/arch/x86/boot/compressed/Makefile
12739@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12740 KBUILD_CFLAGS += -mno-mmx -mno-sse
12741 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12742 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12743+ifdef CONSTIFY_PLUGIN
12744+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12745+endif
12746
12747 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12748 GCOV_PROFILE := n
12749diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12750index a53440e..c3dbf1e 100644
12751--- a/arch/x86/boot/compressed/efi_stub_32.S
12752+++ b/arch/x86/boot/compressed/efi_stub_32.S
12753@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12754 * parameter 2, ..., param n. To make things easy, we save the return
12755 * address of efi_call_phys in a global variable.
12756 */
12757- popl %ecx
12758- movl %ecx, saved_return_addr(%edx)
12759- /* get the function pointer into ECX*/
12760- popl %ecx
12761- movl %ecx, efi_rt_function_ptr(%edx)
12762+ popl saved_return_addr(%edx)
12763+ popl efi_rt_function_ptr(%edx)
12764
12765 /*
12766 * 3. Call the physical function.
12767 */
12768- call *%ecx
12769+ call *efi_rt_function_ptr(%edx)
12770
12771 /*
12772 * 4. Balance the stack. And because EAX contain the return value,
12773@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12774 1: popl %edx
12775 subl $1b, %edx
12776
12777- movl efi_rt_function_ptr(%edx), %ecx
12778- pushl %ecx
12779+ pushl efi_rt_function_ptr(%edx)
12780
12781 /*
12782 * 10. Push the saved return address onto the stack and return.
12783 */
12784- movl saved_return_addr(%edx), %ecx
12785- pushl %ecx
12786- ret
12787+ jmpl *saved_return_addr(%edx)
12788 ENDPROC(efi_call_phys)
12789 .previous
12790
12791diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12792index 630384a..278e788 100644
12793--- a/arch/x86/boot/compressed/efi_thunk_64.S
12794+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12795@@ -189,8 +189,8 @@ efi_gdt64:
12796 .long 0 /* Filled out by user */
12797 .word 0
12798 .quad 0x0000000000000000 /* NULL descriptor */
12799- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12800- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12801+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12802+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12803 .quad 0x0080890000000000 /* TS descriptor */
12804 .quad 0x0000000000000000 /* TS continued */
12805 efi_gdt64_end:
12806diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12807index 1d7fbbc..36ecd58 100644
12808--- a/arch/x86/boot/compressed/head_32.S
12809+++ b/arch/x86/boot/compressed/head_32.S
12810@@ -140,10 +140,10 @@ preferred_addr:
12811 addl %eax, %ebx
12812 notl %eax
12813 andl %eax, %ebx
12814- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12815+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12816 jge 1f
12817 #endif
12818- movl $LOAD_PHYSICAL_ADDR, %ebx
12819+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12820 1:
12821
12822 /* Target address to relocate to for decompression */
12823diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12824index 6b1766c..ad465c9 100644
12825--- a/arch/x86/boot/compressed/head_64.S
12826+++ b/arch/x86/boot/compressed/head_64.S
12827@@ -94,10 +94,10 @@ ENTRY(startup_32)
12828 addl %eax, %ebx
12829 notl %eax
12830 andl %eax, %ebx
12831- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12832+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12833 jge 1f
12834 #endif
12835- movl $LOAD_PHYSICAL_ADDR, %ebx
12836+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12837 1:
12838
12839 /* Target address to relocate to for decompression */
12840@@ -322,10 +322,10 @@ preferred_addr:
12841 addq %rax, %rbp
12842 notq %rax
12843 andq %rax, %rbp
12844- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12845+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12846 jge 1f
12847 #endif
12848- movq $LOAD_PHYSICAL_ADDR, %rbp
12849+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12850 1:
12851
12852 /* Target address to relocate to for decompression */
12853@@ -434,8 +434,8 @@ gdt:
12854 .long gdt
12855 .word 0
12856 .quad 0x0000000000000000 /* NULL descriptor */
12857- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12858- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12859+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12860+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12861 .quad 0x0080890000000000 /* TS descriptor */
12862 .quad 0x0000000000000000 /* TS continued */
12863 gdt_end:
12864diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12865index a950864..c710239 100644
12866--- a/arch/x86/boot/compressed/misc.c
12867+++ b/arch/x86/boot/compressed/misc.c
12868@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12869 * Calculate the delta between where vmlinux was linked to load
12870 * and where it was actually loaded.
12871 */
12872- delta = min_addr - LOAD_PHYSICAL_ADDR;
12873+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12874 if (!delta) {
12875 debug_putstr("No relocation needed... ");
12876 return;
12877@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12878 Elf32_Ehdr ehdr;
12879 Elf32_Phdr *phdrs, *phdr;
12880 #endif
12881- void *dest;
12882+ void *dest, *prev;
12883 int i;
12884
12885 memcpy(&ehdr, output, sizeof(ehdr));
12886@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12887 case PT_LOAD:
12888 #ifdef CONFIG_RELOCATABLE
12889 dest = output;
12890- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12891+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12892 #else
12893 dest = (void *)(phdr->p_paddr);
12894 #endif
12895 memcpy(dest,
12896 output + phdr->p_offset,
12897 phdr->p_filesz);
12898+ if (i)
12899+ memset(prev, 0xff, dest - prev);
12900+ prev = dest + phdr->p_filesz;
12901 break;
12902 default: /* Ignore other PT_* */ break;
12903 }
12904@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12905 error("Destination address too large");
12906 #endif
12907 #ifndef CONFIG_RELOCATABLE
12908- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12909+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12910 error("Wrong destination address");
12911 #endif
12912
12913diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12914index 1fd7d57..0f7d096 100644
12915--- a/arch/x86/boot/cpucheck.c
12916+++ b/arch/x86/boot/cpucheck.c
12917@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12918 u32 ecx = MSR_K7_HWCR;
12919 u32 eax, edx;
12920
12921- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12922+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12923 eax &= ~(1 << 15);
12924- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12925+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12926
12927 get_cpuflags(); /* Make sure it really did something */
12928 err = check_cpuflags();
12929@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12930 u32 ecx = MSR_VIA_FCR;
12931 u32 eax, edx;
12932
12933- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12934+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12935 eax |= (1<<1)|(1<<7);
12936- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12937+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12938
12939 set_bit(X86_FEATURE_CX8, cpu.flags);
12940 err = check_cpuflags();
12941@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12942 u32 eax, edx;
12943 u32 level = 1;
12944
12945- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12946- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12947- asm("cpuid"
12948+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12949+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12950+ asm volatile("cpuid"
12951 : "+a" (level), "=d" (cpu.flags[0])
12952 : : "ecx", "ebx");
12953- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12954+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12955
12956 err = check_cpuflags();
12957 } else if (err == 0x01 &&
12958diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12959index 16ef025..91e033b 100644
12960--- a/arch/x86/boot/header.S
12961+++ b/arch/x86/boot/header.S
12962@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12963 # single linked list of
12964 # struct setup_data
12965
12966-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12967+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12968
12969 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12970+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12971+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12972+#else
12973 #define VO_INIT_SIZE (VO__end - VO__text)
12974+#endif
12975 #if ZO_INIT_SIZE > VO_INIT_SIZE
12976 #define INIT_SIZE ZO_INIT_SIZE
12977 #else
12978diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12979index db75d07..8e6d0af 100644
12980--- a/arch/x86/boot/memory.c
12981+++ b/arch/x86/boot/memory.c
12982@@ -19,7 +19,7 @@
12983
12984 static int detect_memory_e820(void)
12985 {
12986- int count = 0;
12987+ unsigned int count = 0;
12988 struct biosregs ireg, oreg;
12989 struct e820entry *desc = boot_params.e820_map;
12990 static struct e820entry buf; /* static so it is zeroed */
12991diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12992index ba3e100..6501b8f 100644
12993--- a/arch/x86/boot/video-vesa.c
12994+++ b/arch/x86/boot/video-vesa.c
12995@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
12996
12997 boot_params.screen_info.vesapm_seg = oreg.es;
12998 boot_params.screen_info.vesapm_off = oreg.di;
12999+ boot_params.screen_info.vesapm_size = oreg.cx;
13000 }
13001
13002 /*
13003diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13004index 43eda28..5ab5fdb 100644
13005--- a/arch/x86/boot/video.c
13006+++ b/arch/x86/boot/video.c
13007@@ -96,7 +96,7 @@ static void store_mode_params(void)
13008 static unsigned int get_entry(void)
13009 {
13010 char entry_buf[4];
13011- int i, len = 0;
13012+ unsigned int i, len = 0;
13013 int key;
13014 unsigned int v;
13015
13016diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13017index 9105655..41779c1 100644
13018--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13019+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13020@@ -8,6 +8,8 @@
13021 * including this sentence is retained in full.
13022 */
13023
13024+#include <asm/alternative-asm.h>
13025+
13026 .extern crypto_ft_tab
13027 .extern crypto_it_tab
13028 .extern crypto_fl_tab
13029@@ -70,6 +72,8 @@
13030 je B192; \
13031 leaq 32(r9),r9;
13032
13033+#define ret pax_force_retaddr; ret
13034+
13035 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13036 movq r1,r2; \
13037 movq r3,r4; \
13038diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13039index 6bd2c6c..368c93e 100644
13040--- a/arch/x86/crypto/aesni-intel_asm.S
13041+++ b/arch/x86/crypto/aesni-intel_asm.S
13042@@ -31,6 +31,7 @@
13043
13044 #include <linux/linkage.h>
13045 #include <asm/inst.h>
13046+#include <asm/alternative-asm.h>
13047
13048 /*
13049 * The following macros are used to move an (un)aligned 16 byte value to/from
13050@@ -217,7 +218,7 @@ enc: .octa 0x2
13051 * num_initial_blocks = b mod 4
13052 * encrypt the initial num_initial_blocks blocks and apply ghash on
13053 * the ciphertext
13054-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13055+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13056 * are clobbered
13057 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13058 */
13059@@ -227,8 +228,8 @@ enc: .octa 0x2
13060 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13061 MOVADQ SHUF_MASK(%rip), %xmm14
13062 mov arg7, %r10 # %r10 = AAD
13063- mov arg8, %r12 # %r12 = aadLen
13064- mov %r12, %r11
13065+ mov arg8, %r15 # %r15 = aadLen
13066+ mov %r15, %r11
13067 pxor %xmm\i, %xmm\i
13068
13069 _get_AAD_loop\num_initial_blocks\operation:
13070@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13071 psrldq $4, %xmm\i
13072 pxor \TMP1, %xmm\i
13073 add $4, %r10
13074- sub $4, %r12
13075+ sub $4, %r15
13076 jne _get_AAD_loop\num_initial_blocks\operation
13077
13078 cmp $16, %r11
13079 je _get_AAD_loop2_done\num_initial_blocks\operation
13080
13081- mov $16, %r12
13082+ mov $16, %r15
13083 _get_AAD_loop2\num_initial_blocks\operation:
13084 psrldq $4, %xmm\i
13085- sub $4, %r12
13086- cmp %r11, %r12
13087+ sub $4, %r15
13088+ cmp %r11, %r15
13089 jne _get_AAD_loop2\num_initial_blocks\operation
13090
13091 _get_AAD_loop2_done\num_initial_blocks\operation:
13092@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13093 * num_initial_blocks = b mod 4
13094 * encrypt the initial num_initial_blocks blocks and apply ghash on
13095 * the ciphertext
13096-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13097+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13098 * are clobbered
13099 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13100 */
13101@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13102 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13103 MOVADQ SHUF_MASK(%rip), %xmm14
13104 mov arg7, %r10 # %r10 = AAD
13105- mov arg8, %r12 # %r12 = aadLen
13106- mov %r12, %r11
13107+ mov arg8, %r15 # %r15 = aadLen
13108+ mov %r15, %r11
13109 pxor %xmm\i, %xmm\i
13110 _get_AAD_loop\num_initial_blocks\operation:
13111 movd (%r10), \TMP1
13112@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13113 psrldq $4, %xmm\i
13114 pxor \TMP1, %xmm\i
13115 add $4, %r10
13116- sub $4, %r12
13117+ sub $4, %r15
13118 jne _get_AAD_loop\num_initial_blocks\operation
13119 cmp $16, %r11
13120 je _get_AAD_loop2_done\num_initial_blocks\operation
13121- mov $16, %r12
13122+ mov $16, %r15
13123 _get_AAD_loop2\num_initial_blocks\operation:
13124 psrldq $4, %xmm\i
13125- sub $4, %r12
13126- cmp %r11, %r12
13127+ sub $4, %r15
13128+ cmp %r11, %r15
13129 jne _get_AAD_loop2\num_initial_blocks\operation
13130 _get_AAD_loop2_done\num_initial_blocks\operation:
13131 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13132@@ -1280,7 +1281,7 @@ _esb_loop_\@:
13133 *
13134 *****************************************************************************/
13135 ENTRY(aesni_gcm_dec)
13136- push %r12
13137+ push %r15
13138 push %r13
13139 push %r14
13140 mov %rsp, %r14
13141@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13142 */
13143 sub $VARIABLE_OFFSET, %rsp
13144 and $~63, %rsp # align rsp to 64 bytes
13145- mov %arg6, %r12
13146- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13147+ mov %arg6, %r15
13148+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13149 movdqa SHUF_MASK(%rip), %xmm2
13150 PSHUFB_XMM %xmm2, %xmm13
13151
13152@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13153 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13154 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13155 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13156- mov %r13, %r12
13157- and $(3<<4), %r12
13158+ mov %r13, %r15
13159+ and $(3<<4), %r15
13160 jz _initial_num_blocks_is_0_decrypt
13161- cmp $(2<<4), %r12
13162+ cmp $(2<<4), %r15
13163 jb _initial_num_blocks_is_1_decrypt
13164 je _initial_num_blocks_is_2_decrypt
13165 _initial_num_blocks_is_3_decrypt:
13166@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13167 sub $16, %r11
13168 add %r13, %r11
13169 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13170- lea SHIFT_MASK+16(%rip), %r12
13171- sub %r13, %r12
13172+ lea SHIFT_MASK+16(%rip), %r15
13173+ sub %r13, %r15
13174 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13175 # (%r13 is the number of bytes in plaintext mod 16)
13176- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13177+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13178 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13179
13180 movdqa %xmm1, %xmm2
13181 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13182- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13183+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13184 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13185 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13186 pand %xmm1, %xmm2
13187@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13188 sub $1, %r13
13189 jne _less_than_8_bytes_left_decrypt
13190 _multiple_of_16_bytes_decrypt:
13191- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13192- shl $3, %r12 # convert into number of bits
13193- movd %r12d, %xmm15 # len(A) in %xmm15
13194+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13195+ shl $3, %r15 # convert into number of bits
13196+ movd %r15d, %xmm15 # len(A) in %xmm15
13197 shl $3, %arg4 # len(C) in bits (*128)
13198 MOVQ_R64_XMM %arg4, %xmm1
13199 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13200@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13201 mov %r14, %rsp
13202 pop %r14
13203 pop %r13
13204- pop %r12
13205+ pop %r15
13206+ pax_force_retaddr
13207 ret
13208 ENDPROC(aesni_gcm_dec)
13209
13210@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13211 * poly = x^128 + x^127 + x^126 + x^121 + 1
13212 ***************************************************************************/
13213 ENTRY(aesni_gcm_enc)
13214- push %r12
13215+ push %r15
13216 push %r13
13217 push %r14
13218 mov %rsp, %r14
13219@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13220 #
13221 sub $VARIABLE_OFFSET, %rsp
13222 and $~63, %rsp
13223- mov %arg6, %r12
13224- movdqu (%r12), %xmm13
13225+ mov %arg6, %r15
13226+ movdqu (%r15), %xmm13
13227 movdqa SHUF_MASK(%rip), %xmm2
13228 PSHUFB_XMM %xmm2, %xmm13
13229
13230@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13231 movdqa %xmm13, HashKey(%rsp)
13232 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13233 and $-16, %r13
13234- mov %r13, %r12
13235+ mov %r13, %r15
13236
13237 # Encrypt first few blocks
13238
13239- and $(3<<4), %r12
13240+ and $(3<<4), %r15
13241 jz _initial_num_blocks_is_0_encrypt
13242- cmp $(2<<4), %r12
13243+ cmp $(2<<4), %r15
13244 jb _initial_num_blocks_is_1_encrypt
13245 je _initial_num_blocks_is_2_encrypt
13246 _initial_num_blocks_is_3_encrypt:
13247@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13248 sub $16, %r11
13249 add %r13, %r11
13250 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13251- lea SHIFT_MASK+16(%rip), %r12
13252- sub %r13, %r12
13253+ lea SHIFT_MASK+16(%rip), %r15
13254+ sub %r13, %r15
13255 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13256 # (%r13 is the number of bytes in plaintext mod 16)
13257- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13258+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13259 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13260 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13261- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13262+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13263 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13264 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13265 movdqa SHUF_MASK(%rip), %xmm10
13266@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13267 sub $1, %r13
13268 jne _less_than_8_bytes_left_encrypt
13269 _multiple_of_16_bytes_encrypt:
13270- mov arg8, %r12 # %r12 = addLen (number of bytes)
13271- shl $3, %r12
13272- movd %r12d, %xmm15 # len(A) in %xmm15
13273+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13274+ shl $3, %r15
13275+ movd %r15d, %xmm15 # len(A) in %xmm15
13276 shl $3, %arg4 # len(C) in bits (*128)
13277 MOVQ_R64_XMM %arg4, %xmm1
13278 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13279@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13280 mov %r14, %rsp
13281 pop %r14
13282 pop %r13
13283- pop %r12
13284+ pop %r15
13285+ pax_force_retaddr
13286 ret
13287 ENDPROC(aesni_gcm_enc)
13288
13289@@ -1733,6 +1736,7 @@ _key_expansion_256a:
13290 pxor %xmm1, %xmm0
13291 movaps %xmm0, (TKEYP)
13292 add $0x10, TKEYP
13293+ pax_force_retaddr
13294 ret
13295 ENDPROC(_key_expansion_128)
13296 ENDPROC(_key_expansion_256a)
13297@@ -1759,6 +1763,7 @@ _key_expansion_192a:
13298 shufps $0b01001110, %xmm2, %xmm1
13299 movaps %xmm1, 0x10(TKEYP)
13300 add $0x20, TKEYP
13301+ pax_force_retaddr
13302 ret
13303 ENDPROC(_key_expansion_192a)
13304
13305@@ -1779,6 +1784,7 @@ _key_expansion_192b:
13306
13307 movaps %xmm0, (TKEYP)
13308 add $0x10, TKEYP
13309+ pax_force_retaddr
13310 ret
13311 ENDPROC(_key_expansion_192b)
13312
13313@@ -1792,6 +1798,7 @@ _key_expansion_256b:
13314 pxor %xmm1, %xmm2
13315 movaps %xmm2, (TKEYP)
13316 add $0x10, TKEYP
13317+ pax_force_retaddr
13318 ret
13319 ENDPROC(_key_expansion_256b)
13320
13321@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13322 #ifndef __x86_64__
13323 popl KEYP
13324 #endif
13325+ pax_force_retaddr
13326 ret
13327 ENDPROC(aesni_set_key)
13328
13329@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13330 popl KLEN
13331 popl KEYP
13332 #endif
13333+ pax_force_retaddr
13334 ret
13335 ENDPROC(aesni_enc)
13336
13337@@ -1985,6 +1994,7 @@ _aesni_enc1:
13338 AESENC KEY STATE
13339 movaps 0x70(TKEYP), KEY
13340 AESENCLAST KEY STATE
13341+ pax_force_retaddr
13342 ret
13343 ENDPROC(_aesni_enc1)
13344
13345@@ -2094,6 +2104,7 @@ _aesni_enc4:
13346 AESENCLAST KEY STATE2
13347 AESENCLAST KEY STATE3
13348 AESENCLAST KEY STATE4
13349+ pax_force_retaddr
13350 ret
13351 ENDPROC(_aesni_enc4)
13352
13353@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13354 popl KLEN
13355 popl KEYP
13356 #endif
13357+ pax_force_retaddr
13358 ret
13359 ENDPROC(aesni_dec)
13360
13361@@ -2175,6 +2187,7 @@ _aesni_dec1:
13362 AESDEC KEY STATE
13363 movaps 0x70(TKEYP), KEY
13364 AESDECLAST KEY STATE
13365+ pax_force_retaddr
13366 ret
13367 ENDPROC(_aesni_dec1)
13368
13369@@ -2284,6 +2297,7 @@ _aesni_dec4:
13370 AESDECLAST KEY STATE2
13371 AESDECLAST KEY STATE3
13372 AESDECLAST KEY STATE4
13373+ pax_force_retaddr
13374 ret
13375 ENDPROC(_aesni_dec4)
13376
13377@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13378 popl KEYP
13379 popl LEN
13380 #endif
13381+ pax_force_retaddr
13382 ret
13383 ENDPROC(aesni_ecb_enc)
13384
13385@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13386 popl KEYP
13387 popl LEN
13388 #endif
13389+ pax_force_retaddr
13390 ret
13391 ENDPROC(aesni_ecb_dec)
13392
13393@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13394 popl LEN
13395 popl IVP
13396 #endif
13397+ pax_force_retaddr
13398 ret
13399 ENDPROC(aesni_cbc_enc)
13400
13401@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13402 popl LEN
13403 popl IVP
13404 #endif
13405+ pax_force_retaddr
13406 ret
13407 ENDPROC(aesni_cbc_dec)
13408
13409@@ -2561,6 +2579,7 @@ _aesni_inc_init:
13410 mov $1, TCTR_LOW
13411 MOVQ_R64_XMM TCTR_LOW INC
13412 MOVQ_R64_XMM CTR TCTR_LOW
13413+ pax_force_retaddr
13414 ret
13415 ENDPROC(_aesni_inc_init)
13416
13417@@ -2590,6 +2609,7 @@ _aesni_inc:
13418 .Linc_low:
13419 movaps CTR, IV
13420 PSHUFB_XMM BSWAP_MASK IV
13421+ pax_force_retaddr
13422 ret
13423 ENDPROC(_aesni_inc)
13424
13425@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13426 .Lctr_enc_ret:
13427 movups IV, (IVP)
13428 .Lctr_enc_just_ret:
13429+ pax_force_retaddr
13430 ret
13431 ENDPROC(aesni_ctr_enc)
13432
13433@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13434 pxor INC, STATE4
13435 movdqu STATE4, 0x70(OUTP)
13436
13437+ pax_force_retaddr
13438 ret
13439 ENDPROC(aesni_xts_crypt8)
13440
13441diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13442index 246c670..466e2d6 100644
13443--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13444+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13445@@ -21,6 +21,7 @@
13446 */
13447
13448 #include <linux/linkage.h>
13449+#include <asm/alternative-asm.h>
13450
13451 .file "blowfish-x86_64-asm.S"
13452 .text
13453@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13454 jnz .L__enc_xor;
13455
13456 write_block();
13457+ pax_force_retaddr
13458 ret;
13459 .L__enc_xor:
13460 xor_block();
13461+ pax_force_retaddr
13462 ret;
13463 ENDPROC(__blowfish_enc_blk)
13464
13465@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13466
13467 movq %r11, %rbp;
13468
13469+ pax_force_retaddr
13470 ret;
13471 ENDPROC(blowfish_dec_blk)
13472
13473@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13474
13475 popq %rbx;
13476 popq %rbp;
13477+ pax_force_retaddr
13478 ret;
13479
13480 .L__enc_xor4:
13481@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13482
13483 popq %rbx;
13484 popq %rbp;
13485+ pax_force_retaddr
13486 ret;
13487 ENDPROC(__blowfish_enc_blk_4way)
13488
13489@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13490 popq %rbx;
13491 popq %rbp;
13492
13493+ pax_force_retaddr
13494 ret;
13495 ENDPROC(blowfish_dec_blk_4way)
13496diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13497index ce71f92..1dce7ec 100644
13498--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13499+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13500@@ -16,6 +16,7 @@
13501 */
13502
13503 #include <linux/linkage.h>
13504+#include <asm/alternative-asm.h>
13505
13506 #define CAMELLIA_TABLE_BYTE_LEN 272
13507
13508@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13509 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13510 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13511 %rcx, (%r9));
13512+ pax_force_retaddr
13513 ret;
13514 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13515
13516@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13517 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13518 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13519 %rax, (%r9));
13520+ pax_force_retaddr
13521 ret;
13522 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13523
13524@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13525 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13526 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13527
13528+ pax_force_retaddr
13529 ret;
13530
13531 .align 8
13532@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13533 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13534 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13535
13536+ pax_force_retaddr
13537 ret;
13538
13539 .align 8
13540@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13541 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13542 %xmm8, %rsi);
13543
13544+ pax_force_retaddr
13545 ret;
13546 ENDPROC(camellia_ecb_enc_16way)
13547
13548@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13549 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13550 %xmm8, %rsi);
13551
13552+ pax_force_retaddr
13553 ret;
13554 ENDPROC(camellia_ecb_dec_16way)
13555
13556@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13557 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13558 %xmm8, %rsi);
13559
13560+ pax_force_retaddr
13561 ret;
13562 ENDPROC(camellia_cbc_dec_16way)
13563
13564@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13565 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13566 %xmm8, %rsi);
13567
13568+ pax_force_retaddr
13569 ret;
13570 ENDPROC(camellia_ctr_16way)
13571
13572@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13573 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13574 %xmm8, %rsi);
13575
13576+ pax_force_retaddr
13577 ret;
13578 ENDPROC(camellia_xts_crypt_16way)
13579
13580diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13581index 0e0b886..5a3123c 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13584@@ -11,6 +11,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13594 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13602 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13609 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13610 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13617 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13618 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13625
13626 vzeroupper;
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_32way)
13631
13632@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13633
13634 vzeroupper;
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_32way)
13639
13640@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13641
13642 vzeroupper;
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_32way)
13647
13648@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13649
13650 vzeroupper;
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_32way)
13655
13656@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13657
13658 vzeroupper;
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_32way)
13663
13664diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13665index 310319c..db3d7b5 100644
13666--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13667+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13668@@ -21,6 +21,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 .file "camellia-x86_64-asm_64.S"
13675 .text
13676@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13677 enc_outunpack(mov, RT1);
13678
13679 movq RRBP, %rbp;
13680+ pax_force_retaddr
13681 ret;
13682
13683 .L__enc_xor:
13684 enc_outunpack(xor, RT1);
13685
13686 movq RRBP, %rbp;
13687+ pax_force_retaddr
13688 ret;
13689 ENDPROC(__camellia_enc_blk)
13690
13691@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13692 dec_outunpack();
13693
13694 movq RRBP, %rbp;
13695+ pax_force_retaddr
13696 ret;
13697 ENDPROC(camellia_dec_blk)
13698
13699@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13700
13701 movq RRBP, %rbp;
13702 popq %rbx;
13703+ pax_force_retaddr
13704 ret;
13705
13706 .L__enc2_xor:
13707@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13708
13709 movq RRBP, %rbp;
13710 popq %rbx;
13711+ pax_force_retaddr
13712 ret;
13713 ENDPROC(__camellia_enc_blk_2way)
13714
13715@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13716
13717 movq RRBP, %rbp;
13718 movq RXOR, %rbx;
13719+ pax_force_retaddr
13720 ret;
13721 ENDPROC(camellia_dec_blk_2way)
13722diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13723index c35fd5d..2d8c7db 100644
13724--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13725+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13726@@ -24,6 +24,7 @@
13727 */
13728
13729 #include <linux/linkage.h>
13730+#include <asm/alternative-asm.h>
13731
13732 .file "cast5-avx-x86_64-asm_64.S"
13733
13734@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13735 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13736 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13737
13738+ pax_force_retaddr
13739 ret;
13740 ENDPROC(__cast5_enc_blk16)
13741
13742@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13743 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13744 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13745
13746+ pax_force_retaddr
13747 ret;
13748
13749 .L__skip_dec:
13750@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13751 vmovdqu RR4, (6*4*4)(%r11);
13752 vmovdqu RL4, (7*4*4)(%r11);
13753
13754+ pax_force_retaddr
13755 ret;
13756 ENDPROC(cast5_ecb_enc_16way)
13757
13758@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13759 vmovdqu RR4, (6*4*4)(%r11);
13760 vmovdqu RL4, (7*4*4)(%r11);
13761
13762+ pax_force_retaddr
13763 ret;
13764 ENDPROC(cast5_ecb_dec_16way)
13765
13766@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13767 * %rdx: src
13768 */
13769
13770- pushq %r12;
13771+ pushq %r14;
13772
13773 movq %rsi, %r11;
13774- movq %rdx, %r12;
13775+ movq %rdx, %r14;
13776
13777 vmovdqu (0*16)(%rdx), RL1;
13778 vmovdqu (1*16)(%rdx), RR1;
13779@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13780 call __cast5_dec_blk16;
13781
13782 /* xor with src */
13783- vmovq (%r12), RX;
13784+ vmovq (%r14), RX;
13785 vpshufd $0x4f, RX, RX;
13786 vpxor RX, RR1, RR1;
13787- vpxor 0*16+8(%r12), RL1, RL1;
13788- vpxor 1*16+8(%r12), RR2, RR2;
13789- vpxor 2*16+8(%r12), RL2, RL2;
13790- vpxor 3*16+8(%r12), RR3, RR3;
13791- vpxor 4*16+8(%r12), RL3, RL3;
13792- vpxor 5*16+8(%r12), RR4, RR4;
13793- vpxor 6*16+8(%r12), RL4, RL4;
13794+ vpxor 0*16+8(%r14), RL1, RL1;
13795+ vpxor 1*16+8(%r14), RR2, RR2;
13796+ vpxor 2*16+8(%r14), RL2, RL2;
13797+ vpxor 3*16+8(%r14), RR3, RR3;
13798+ vpxor 4*16+8(%r14), RL3, RL3;
13799+ vpxor 5*16+8(%r14), RR4, RR4;
13800+ vpxor 6*16+8(%r14), RL4, RL4;
13801
13802 vmovdqu RR1, (0*16)(%r11);
13803 vmovdqu RL1, (1*16)(%r11);
13804@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13805 vmovdqu RR4, (6*16)(%r11);
13806 vmovdqu RL4, (7*16)(%r11);
13807
13808- popq %r12;
13809+ popq %r14;
13810
13811+ pax_force_retaddr
13812 ret;
13813 ENDPROC(cast5_cbc_dec_16way)
13814
13815@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13816 * %rcx: iv (big endian, 64bit)
13817 */
13818
13819- pushq %r12;
13820+ pushq %r14;
13821
13822 movq %rsi, %r11;
13823- movq %rdx, %r12;
13824+ movq %rdx, %r14;
13825
13826 vpcmpeqd RTMP, RTMP, RTMP;
13827 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13828@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13829 call __cast5_enc_blk16;
13830
13831 /* dst = src ^ iv */
13832- vpxor (0*16)(%r12), RR1, RR1;
13833- vpxor (1*16)(%r12), RL1, RL1;
13834- vpxor (2*16)(%r12), RR2, RR2;
13835- vpxor (3*16)(%r12), RL2, RL2;
13836- vpxor (4*16)(%r12), RR3, RR3;
13837- vpxor (5*16)(%r12), RL3, RL3;
13838- vpxor (6*16)(%r12), RR4, RR4;
13839- vpxor (7*16)(%r12), RL4, RL4;
13840+ vpxor (0*16)(%r14), RR1, RR1;
13841+ vpxor (1*16)(%r14), RL1, RL1;
13842+ vpxor (2*16)(%r14), RR2, RR2;
13843+ vpxor (3*16)(%r14), RL2, RL2;
13844+ vpxor (4*16)(%r14), RR3, RR3;
13845+ vpxor (5*16)(%r14), RL3, RL3;
13846+ vpxor (6*16)(%r14), RR4, RR4;
13847+ vpxor (7*16)(%r14), RL4, RL4;
13848 vmovdqu RR1, (0*16)(%r11);
13849 vmovdqu RL1, (1*16)(%r11);
13850 vmovdqu RR2, (2*16)(%r11);
13851@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13852 vmovdqu RR4, (6*16)(%r11);
13853 vmovdqu RL4, (7*16)(%r11);
13854
13855- popq %r12;
13856+ popq %r14;
13857
13858+ pax_force_retaddr
13859 ret;
13860 ENDPROC(cast5_ctr_16way)
13861diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13862index e3531f8..e123f35 100644
13863--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13864+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13865@@ -24,6 +24,7 @@
13866 */
13867
13868 #include <linux/linkage.h>
13869+#include <asm/alternative-asm.h>
13870 #include "glue_helper-asm-avx.S"
13871
13872 .file "cast6-avx-x86_64-asm_64.S"
13873@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13874 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13875 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13876
13877+ pax_force_retaddr
13878 ret;
13879 ENDPROC(__cast6_enc_blk8)
13880
13881@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13882 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13883 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13884
13885+ pax_force_retaddr
13886 ret;
13887 ENDPROC(__cast6_dec_blk8)
13888
13889@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13890
13891 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13892
13893+ pax_force_retaddr
13894 ret;
13895 ENDPROC(cast6_ecb_enc_8way)
13896
13897@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13898
13899 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13900
13901+ pax_force_retaddr
13902 ret;
13903 ENDPROC(cast6_ecb_dec_8way)
13904
13905@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13906 * %rdx: src
13907 */
13908
13909- pushq %r12;
13910+ pushq %r14;
13911
13912 movq %rsi, %r11;
13913- movq %rdx, %r12;
13914+ movq %rdx, %r14;
13915
13916 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13917
13918 call __cast6_dec_blk8;
13919
13920- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13921+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13922
13923- popq %r12;
13924+ popq %r14;
13925
13926+ pax_force_retaddr
13927 ret;
13928 ENDPROC(cast6_cbc_dec_8way)
13929
13930@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13931 * %rcx: iv (little endian, 128bit)
13932 */
13933
13934- pushq %r12;
13935+ pushq %r14;
13936
13937 movq %rsi, %r11;
13938- movq %rdx, %r12;
13939+ movq %rdx, %r14;
13940
13941 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13942 RD2, RX, RKR, RKM);
13943
13944 call __cast6_enc_blk8;
13945
13946- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13947+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13948
13949- popq %r12;
13950+ popq %r14;
13951
13952+ pax_force_retaddr
13953 ret;
13954 ENDPROC(cast6_ctr_8way)
13955
13956@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13957 /* dst <= regs xor IVs(in dst) */
13958 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13959
13960+ pax_force_retaddr
13961 ret;
13962 ENDPROC(cast6_xts_enc_8way)
13963
13964@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13965 /* dst <= regs xor IVs(in dst) */
13966 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13967
13968+ pax_force_retaddr
13969 ret;
13970 ENDPROC(cast6_xts_dec_8way)
13971diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13972index 26d49eb..c0a8c84 100644
13973--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13974+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13975@@ -45,6 +45,7 @@
13976
13977 #include <asm/inst.h>
13978 #include <linux/linkage.h>
13979+#include <asm/alternative-asm.h>
13980
13981 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13982
13983@@ -309,6 +310,7 @@ do_return:
13984 popq %rsi
13985 popq %rdi
13986 popq %rbx
13987+ pax_force_retaddr
13988 ret
13989
13990 ################################################################
13991diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13992index 5d1e007..098cb4f 100644
13993--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13994+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13995@@ -18,6 +18,7 @@
13996
13997 #include <linux/linkage.h>
13998 #include <asm/inst.h>
13999+#include <asm/alternative-asm.h>
14000
14001 .data
14002
14003@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14004 psrlq $1, T2
14005 pxor T2, T1
14006 pxor T1, DATA
14007+ pax_force_retaddr
14008 ret
14009 ENDPROC(__clmul_gf128mul_ble)
14010
14011@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14012 call __clmul_gf128mul_ble
14013 PSHUFB_XMM BSWAP DATA
14014 movups DATA, (%rdi)
14015+ pax_force_retaddr
14016 ret
14017 ENDPROC(clmul_ghash_mul)
14018
14019@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14020 PSHUFB_XMM BSWAP DATA
14021 movups DATA, (%rdi)
14022 .Lupdate_just_ret:
14023+ pax_force_retaddr
14024 ret
14025 ENDPROC(clmul_ghash_update)
14026diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14027index 9279e0b..c4b3d2c 100644
14028--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14029+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14030@@ -1,4 +1,5 @@
14031 #include <linux/linkage.h>
14032+#include <asm/alternative-asm.h>
14033
14034 # enter salsa20_encrypt_bytes
14035 ENTRY(salsa20_encrypt_bytes)
14036@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14037 add %r11,%rsp
14038 mov %rdi,%rax
14039 mov %rsi,%rdx
14040+ pax_force_retaddr
14041 ret
14042 # bytesatleast65:
14043 ._bytesatleast65:
14044@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14045 add %r11,%rsp
14046 mov %rdi,%rax
14047 mov %rsi,%rdx
14048+ pax_force_retaddr
14049 ret
14050 ENDPROC(salsa20_keysetup)
14051
14052@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14053 add %r11,%rsp
14054 mov %rdi,%rax
14055 mov %rsi,%rdx
14056+ pax_force_retaddr
14057 ret
14058 ENDPROC(salsa20_ivsetup)
14059diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14060index 2f202f4..d9164d6 100644
14061--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14062+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14063@@ -24,6 +24,7 @@
14064 */
14065
14066 #include <linux/linkage.h>
14067+#include <asm/alternative-asm.h>
14068 #include "glue_helper-asm-avx.S"
14069
14070 .file "serpent-avx-x86_64-asm_64.S"
14071@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14072 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14073 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14074
14075+ pax_force_retaddr
14076 ret;
14077 ENDPROC(__serpent_enc_blk8_avx)
14078
14079@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14080 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14081 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14082
14083+ pax_force_retaddr
14084 ret;
14085 ENDPROC(__serpent_dec_blk8_avx)
14086
14087@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14088
14089 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14090
14091+ pax_force_retaddr
14092 ret;
14093 ENDPROC(serpent_ecb_enc_8way_avx)
14094
14095@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14096
14097 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14098
14099+ pax_force_retaddr
14100 ret;
14101 ENDPROC(serpent_ecb_dec_8way_avx)
14102
14103@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14104
14105 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14106
14107+ pax_force_retaddr
14108 ret;
14109 ENDPROC(serpent_cbc_dec_8way_avx)
14110
14111@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14112
14113 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14114
14115+ pax_force_retaddr
14116 ret;
14117 ENDPROC(serpent_ctr_8way_avx)
14118
14119@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14120 /* dst <= regs xor IVs(in dst) */
14121 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14122
14123+ pax_force_retaddr
14124 ret;
14125 ENDPROC(serpent_xts_enc_8way_avx)
14126
14127@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14128 /* dst <= regs xor IVs(in dst) */
14129 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14130
14131+ pax_force_retaddr
14132 ret;
14133 ENDPROC(serpent_xts_dec_8way_avx)
14134diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14135index b222085..abd483c 100644
14136--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14137+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14138@@ -15,6 +15,7 @@
14139 */
14140
14141 #include <linux/linkage.h>
14142+#include <asm/alternative-asm.h>
14143 #include "glue_helper-asm-avx2.S"
14144
14145 .file "serpent-avx2-asm_64.S"
14146@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14147 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14148 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14149
14150+ pax_force_retaddr
14151 ret;
14152 ENDPROC(__serpent_enc_blk16)
14153
14154@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14155 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14156 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14157
14158+ pax_force_retaddr
14159 ret;
14160 ENDPROC(__serpent_dec_blk16)
14161
14162@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14163
14164 vzeroupper;
14165
14166+ pax_force_retaddr
14167 ret;
14168 ENDPROC(serpent_ecb_enc_16way)
14169
14170@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14171
14172 vzeroupper;
14173
14174+ pax_force_retaddr
14175 ret;
14176 ENDPROC(serpent_ecb_dec_16way)
14177
14178@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14179
14180 vzeroupper;
14181
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(serpent_cbc_dec_16way)
14185
14186@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14187
14188 vzeroupper;
14189
14190+ pax_force_retaddr
14191 ret;
14192 ENDPROC(serpent_ctr_16way)
14193
14194@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14195
14196 vzeroupper;
14197
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(serpent_xts_enc_16way)
14201
14202@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14203
14204 vzeroupper;
14205
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(serpent_xts_dec_16way)
14209diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14210index acc066c..1559cc4 100644
14211--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14212+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14213@@ -25,6 +25,7 @@
14214 */
14215
14216 #include <linux/linkage.h>
14217+#include <asm/alternative-asm.h>
14218
14219 .file "serpent-sse2-x86_64-asm_64.S"
14220 .text
14221@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14222 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14223 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14224
14225+ pax_force_retaddr
14226 ret;
14227
14228 .L__enc_xor8:
14229 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14230 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14231
14232+ pax_force_retaddr
14233 ret;
14234 ENDPROC(__serpent_enc_blk_8way)
14235
14236@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14237 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14238 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14239
14240+ pax_force_retaddr
14241 ret;
14242 ENDPROC(serpent_dec_blk_8way)
14243diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14244index a410950..9dfe7ad 100644
14245--- a/arch/x86/crypto/sha1_ssse3_asm.S
14246+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14247@@ -29,6 +29,7 @@
14248 */
14249
14250 #include <linux/linkage.h>
14251+#include <asm/alternative-asm.h>
14252
14253 #define CTX %rdi // arg1
14254 #define BUF %rsi // arg2
14255@@ -75,9 +76,9 @@
14256
14257 push %rbx
14258 push %rbp
14259- push %r12
14260+ push %r14
14261
14262- mov %rsp, %r12
14263+ mov %rsp, %r14
14264 sub $64, %rsp # allocate workspace
14265 and $~15, %rsp # align stack
14266
14267@@ -99,11 +100,12 @@
14268 xor %rax, %rax
14269 rep stosq
14270
14271- mov %r12, %rsp # deallocate workspace
14272+ mov %r14, %rsp # deallocate workspace
14273
14274- pop %r12
14275+ pop %r14
14276 pop %rbp
14277 pop %rbx
14278+ pax_force_retaddr
14279 ret
14280
14281 ENDPROC(\name)
14282diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14283index 642f156..51a513c 100644
14284--- a/arch/x86/crypto/sha256-avx-asm.S
14285+++ b/arch/x86/crypto/sha256-avx-asm.S
14286@@ -49,6 +49,7 @@
14287
14288 #ifdef CONFIG_AS_AVX
14289 #include <linux/linkage.h>
14290+#include <asm/alternative-asm.h>
14291
14292 ## assume buffers not aligned
14293 #define VMOVDQ vmovdqu
14294@@ -460,6 +461,7 @@ done_hash:
14295 popq %r13
14296 popq %rbp
14297 popq %rbx
14298+ pax_force_retaddr
14299 ret
14300 ENDPROC(sha256_transform_avx)
14301
14302diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14303index 9e86944..3795e6a 100644
14304--- a/arch/x86/crypto/sha256-avx2-asm.S
14305+++ b/arch/x86/crypto/sha256-avx2-asm.S
14306@@ -50,6 +50,7 @@
14307
14308 #ifdef CONFIG_AS_AVX2
14309 #include <linux/linkage.h>
14310+#include <asm/alternative-asm.h>
14311
14312 ## assume buffers not aligned
14313 #define VMOVDQ vmovdqu
14314@@ -720,6 +721,7 @@ done_hash:
14315 popq %r12
14316 popq %rbp
14317 popq %rbx
14318+ pax_force_retaddr
14319 ret
14320 ENDPROC(sha256_transform_rorx)
14321
14322diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14323index f833b74..8c62a9e 100644
14324--- a/arch/x86/crypto/sha256-ssse3-asm.S
14325+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14326@@ -47,6 +47,7 @@
14327 ########################################################################
14328
14329 #include <linux/linkage.h>
14330+#include <asm/alternative-asm.h>
14331
14332 ## assume buffers not aligned
14333 #define MOVDQ movdqu
14334@@ -471,6 +472,7 @@ done_hash:
14335 popq %rbp
14336 popq %rbx
14337
14338+ pax_force_retaddr
14339 ret
14340 ENDPROC(sha256_transform_ssse3)
14341
14342diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14343index 974dde9..a823ff9 100644
14344--- a/arch/x86/crypto/sha512-avx-asm.S
14345+++ b/arch/x86/crypto/sha512-avx-asm.S
14346@@ -49,6 +49,7 @@
14347
14348 #ifdef CONFIG_AS_AVX
14349 #include <linux/linkage.h>
14350+#include <asm/alternative-asm.h>
14351
14352 .text
14353
14354@@ -364,6 +365,7 @@ updateblock:
14355 mov frame_RSPSAVE(%rsp), %rsp
14356
14357 nowork:
14358+ pax_force_retaddr
14359 ret
14360 ENDPROC(sha512_transform_avx)
14361
14362diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14363index 568b961..ed20c37 100644
14364--- a/arch/x86/crypto/sha512-avx2-asm.S
14365+++ b/arch/x86/crypto/sha512-avx2-asm.S
14366@@ -51,6 +51,7 @@
14367
14368 #ifdef CONFIG_AS_AVX2
14369 #include <linux/linkage.h>
14370+#include <asm/alternative-asm.h>
14371
14372 .text
14373
14374@@ -678,6 +679,7 @@ done_hash:
14375
14376 # Restore Stack Pointer
14377 mov frame_RSPSAVE(%rsp), %rsp
14378+ pax_force_retaddr
14379 ret
14380 ENDPROC(sha512_transform_rorx)
14381
14382diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14383index fb56855..6edd768 100644
14384--- a/arch/x86/crypto/sha512-ssse3-asm.S
14385+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14386@@ -48,6 +48,7 @@
14387 ########################################################################
14388
14389 #include <linux/linkage.h>
14390+#include <asm/alternative-asm.h>
14391
14392 .text
14393
14394@@ -363,6 +364,7 @@ updateblock:
14395 mov frame_RSPSAVE(%rsp), %rsp
14396
14397 nowork:
14398+ pax_force_retaddr
14399 ret
14400 ENDPROC(sha512_transform_ssse3)
14401
14402diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14403index 0505813..b067311 100644
14404--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14405+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14406@@ -24,6 +24,7 @@
14407 */
14408
14409 #include <linux/linkage.h>
14410+#include <asm/alternative-asm.h>
14411 #include "glue_helper-asm-avx.S"
14412
14413 .file "twofish-avx-x86_64-asm_64.S"
14414@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14415 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14416 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14417
14418+ pax_force_retaddr
14419 ret;
14420 ENDPROC(__twofish_enc_blk8)
14421
14422@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14423 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14424 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14425
14426+ pax_force_retaddr
14427 ret;
14428 ENDPROC(__twofish_dec_blk8)
14429
14430@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14431
14432 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14433
14434+ pax_force_retaddr
14435 ret;
14436 ENDPROC(twofish_ecb_enc_8way)
14437
14438@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14439
14440 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14441
14442+ pax_force_retaddr
14443 ret;
14444 ENDPROC(twofish_ecb_dec_8way)
14445
14446@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14447 * %rdx: src
14448 */
14449
14450- pushq %r12;
14451+ pushq %r14;
14452
14453 movq %rsi, %r11;
14454- movq %rdx, %r12;
14455+ movq %rdx, %r14;
14456
14457 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14458
14459 call __twofish_dec_blk8;
14460
14461- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14462+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14463
14464- popq %r12;
14465+ popq %r14;
14466
14467+ pax_force_retaddr
14468 ret;
14469 ENDPROC(twofish_cbc_dec_8way)
14470
14471@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14472 * %rcx: iv (little endian, 128bit)
14473 */
14474
14475- pushq %r12;
14476+ pushq %r14;
14477
14478 movq %rsi, %r11;
14479- movq %rdx, %r12;
14480+ movq %rdx, %r14;
14481
14482 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14483 RD2, RX0, RX1, RY0);
14484
14485 call __twofish_enc_blk8;
14486
14487- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14488+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14489
14490- popq %r12;
14491+ popq %r14;
14492
14493+ pax_force_retaddr
14494 ret;
14495 ENDPROC(twofish_ctr_8way)
14496
14497@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14498 /* dst <= regs xor IVs(in dst) */
14499 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14500
14501+ pax_force_retaddr
14502 ret;
14503 ENDPROC(twofish_xts_enc_8way)
14504
14505@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14506 /* dst <= regs xor IVs(in dst) */
14507 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14508
14509+ pax_force_retaddr
14510 ret;
14511 ENDPROC(twofish_xts_dec_8way)
14512diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14513index 1c3b7ce..02f578d 100644
14514--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14515+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14516@@ -21,6 +21,7 @@
14517 */
14518
14519 #include <linux/linkage.h>
14520+#include <asm/alternative-asm.h>
14521
14522 .file "twofish-x86_64-asm-3way.S"
14523 .text
14524@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14525 popq %r13;
14526 popq %r14;
14527 popq %r15;
14528+ pax_force_retaddr
14529 ret;
14530
14531 .L__enc_xor3:
14532@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14533 popq %r13;
14534 popq %r14;
14535 popq %r15;
14536+ pax_force_retaddr
14537 ret;
14538 ENDPROC(__twofish_enc_blk_3way)
14539
14540@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14541 popq %r13;
14542 popq %r14;
14543 popq %r15;
14544+ pax_force_retaddr
14545 ret;
14546 ENDPROC(twofish_dec_blk_3way)
14547diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14548index a039d21..524b8b2 100644
14549--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14550+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14551@@ -22,6 +22,7 @@
14552
14553 #include <linux/linkage.h>
14554 #include <asm/asm-offsets.h>
14555+#include <asm/alternative-asm.h>
14556
14557 #define a_offset 0
14558 #define b_offset 4
14559@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14560
14561 popq R1
14562 movq $1,%rax
14563+ pax_force_retaddr
14564 ret
14565 ENDPROC(twofish_enc_blk)
14566
14567@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14568
14569 popq R1
14570 movq $1,%rax
14571+ pax_force_retaddr
14572 ret
14573 ENDPROC(twofish_dec_blk)
14574diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14575index ae6aad1..719d6d9 100644
14576--- a/arch/x86/ia32/ia32_aout.c
14577+++ b/arch/x86/ia32/ia32_aout.c
14578@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14579 unsigned long dump_start, dump_size;
14580 struct user32 dump;
14581
14582+ memset(&dump, 0, sizeof(dump));
14583+
14584 fs = get_fs();
14585 set_fs(KERNEL_DS);
14586 has_dumped = 1;
14587diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14588index d0165c9..0d5639b 100644
14589--- a/arch/x86/ia32/ia32_signal.c
14590+++ b/arch/x86/ia32/ia32_signal.c
14591@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14592 if (__get_user(set.sig[0], &frame->sc.oldmask)
14593 || (_COMPAT_NSIG_WORDS > 1
14594 && __copy_from_user((((char *) &set.sig) + 4),
14595- &frame->extramask,
14596+ frame->extramask,
14597 sizeof(frame->extramask))))
14598 goto badframe;
14599
14600@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14601 sp -= frame_size;
14602 /* Align the stack pointer according to the i386 ABI,
14603 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14604- sp = ((sp + 4) & -16ul) - 4;
14605+ sp = ((sp - 12) & -16ul) - 4;
14606 return (void __user *) sp;
14607 }
14608
14609@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14610 } else {
14611 /* Return stub is in 32bit vsyscall page */
14612 if (current->mm->context.vdso)
14613- restorer = current->mm->context.vdso +
14614- selected_vdso32->sym___kernel_sigreturn;
14615+ restorer = (void __force_user *)(current->mm->context.vdso +
14616+ selected_vdso32->sym___kernel_sigreturn);
14617 else
14618- restorer = &frame->retcode;
14619+ restorer = frame->retcode;
14620 }
14621
14622 put_user_try {
14623@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14624 * These are actually not used anymore, but left because some
14625 * gdb versions depend on them as a marker.
14626 */
14627- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14628+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14629 } put_user_catch(err);
14630
14631 if (err)
14632@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14633 0xb8,
14634 __NR_ia32_rt_sigreturn,
14635 0x80cd,
14636- 0,
14637+ 0
14638 };
14639
14640 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14641@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14642
14643 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14644 restorer = ksig->ka.sa.sa_restorer;
14645+ else if (current->mm->context.vdso)
14646+ /* Return stub is in 32bit vsyscall page */
14647+ restorer = (void __force_user *)(current->mm->context.vdso +
14648+ selected_vdso32->sym___kernel_rt_sigreturn);
14649 else
14650- restorer = current->mm->context.vdso +
14651- selected_vdso32->sym___kernel_rt_sigreturn;
14652+ restorer = frame->retcode;
14653 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14654
14655 /*
14656 * Not actually used anymore, but left because some gdb
14657 * versions need it.
14658 */
14659- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14660+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14661 } put_user_catch(err);
14662
14663 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14664diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14665index 156ebca..9591cf0 100644
14666--- a/arch/x86/ia32/ia32entry.S
14667+++ b/arch/x86/ia32/ia32entry.S
14668@@ -15,8 +15,10 @@
14669 #include <asm/irqflags.h>
14670 #include <asm/asm.h>
14671 #include <asm/smap.h>
14672+#include <asm/pgtable.h>
14673 #include <linux/linkage.h>
14674 #include <linux/err.h>
14675+#include <asm/alternative-asm.h>
14676
14677 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14678 #include <linux/elf-em.h>
14679@@ -62,12 +64,12 @@
14680 */
14681 .macro LOAD_ARGS32 offset, _r9=0
14682 .if \_r9
14683- movl \offset+16(%rsp),%r9d
14684+ movl \offset+R9(%rsp),%r9d
14685 .endif
14686- movl \offset+40(%rsp),%ecx
14687- movl \offset+48(%rsp),%edx
14688- movl \offset+56(%rsp),%esi
14689- movl \offset+64(%rsp),%edi
14690+ movl \offset+RCX(%rsp),%ecx
14691+ movl \offset+RDX(%rsp),%edx
14692+ movl \offset+RSI(%rsp),%esi
14693+ movl \offset+RDI(%rsp),%edi
14694 movl %eax,%eax /* zero extension */
14695 .endm
14696
14697@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14698 ENDPROC(native_irq_enable_sysexit)
14699 #endif
14700
14701+ .macro pax_enter_kernel_user
14702+ pax_set_fptr_mask
14703+#ifdef CONFIG_PAX_MEMORY_UDEREF
14704+ call pax_enter_kernel_user
14705+#endif
14706+ .endm
14707+
14708+ .macro pax_exit_kernel_user
14709+#ifdef CONFIG_PAX_MEMORY_UDEREF
14710+ call pax_exit_kernel_user
14711+#endif
14712+#ifdef CONFIG_PAX_RANDKSTACK
14713+ pushq %rax
14714+ pushq %r11
14715+ call pax_randomize_kstack
14716+ popq %r11
14717+ popq %rax
14718+#endif
14719+ .endm
14720+
14721+ .macro pax_erase_kstack
14722+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14723+ call pax_erase_kstack
14724+#endif
14725+ .endm
14726+
14727 /*
14728 * 32bit SYSENTER instruction entry.
14729 *
14730@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14731 CFI_REGISTER rsp,rbp
14732 SWAPGS_UNSAFE_STACK
14733 movq PER_CPU_VAR(kernel_stack), %rsp
14734- addq $(KERNEL_STACK_OFFSET),%rsp
14735- /*
14736- * No need to follow this irqs on/off section: the syscall
14737- * disabled irqs, here we enable it straight after entry:
14738- */
14739- ENABLE_INTERRUPTS(CLBR_NONE)
14740 movl %ebp,%ebp /* zero extension */
14741 pushq_cfi $__USER32_DS
14742 /*CFI_REL_OFFSET ss,0*/
14743@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14744 CFI_REL_OFFSET rsp,0
14745 pushfq_cfi
14746 /*CFI_REL_OFFSET rflags,0*/
14747- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14748- CFI_REGISTER rip,r10
14749+ orl $X86_EFLAGS_IF,(%rsp)
14750+ GET_THREAD_INFO(%r11)
14751+ movl TI_sysenter_return(%r11), %r11d
14752+ CFI_REGISTER rip,r11
14753 pushq_cfi $__USER32_CS
14754 /*CFI_REL_OFFSET cs,0*/
14755 movl %eax, %eax
14756- pushq_cfi %r10
14757+ pushq_cfi %r11
14758 CFI_REL_OFFSET rip,0
14759 pushq_cfi %rax
14760 cld
14761 SAVE_ARGS 0,1,0
14762+ pax_enter_kernel_user
14763+
14764+#ifdef CONFIG_PAX_RANDKSTACK
14765+ pax_erase_kstack
14766+#endif
14767+
14768+ /*
14769+ * No need to follow this irqs on/off section: the syscall
14770+ * disabled irqs, here we enable it straight after entry:
14771+ */
14772+ ENABLE_INTERRUPTS(CLBR_NONE)
14773 /* no need to do an access_ok check here because rbp has been
14774 32bit zero extended */
14775+
14776+#ifdef CONFIG_PAX_MEMORY_UDEREF
14777+ addq pax_user_shadow_base,%rbp
14778+ ASM_PAX_OPEN_USERLAND
14779+#endif
14780+
14781 ASM_STAC
14782 1: movl (%rbp),%ebp
14783 _ASM_EXTABLE(1b,ia32_badarg)
14784 ASM_CLAC
14785
14786+#ifdef CONFIG_PAX_MEMORY_UDEREF
14787+ ASM_PAX_CLOSE_USERLAND
14788+#endif
14789+
14790 /*
14791 * Sysenter doesn't filter flags, so we need to clear NT
14792 * ourselves. To save a few cycles, we can check whether
14793@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14794 jnz sysenter_fix_flags
14795 sysenter_flags_fixed:
14796
14797- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14798- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14799+ GET_THREAD_INFO(%r11)
14800+ orl $TS_COMPAT,TI_status(%r11)
14801+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14802 CFI_REMEMBER_STATE
14803 jnz sysenter_tracesys
14804 cmpq $(IA32_NR_syscalls-1),%rax
14805@@ -172,14 +218,17 @@ sysenter_do_call:
14806 sysenter_dispatch:
14807 call *ia32_sys_call_table(,%rax,8)
14808 movq %rax,RAX-ARGOFFSET(%rsp)
14809+ GET_THREAD_INFO(%r11)
14810 DISABLE_INTERRUPTS(CLBR_NONE)
14811 TRACE_IRQS_OFF
14812- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14813+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14814 jnz sysexit_audit
14815 sysexit_from_sys_call:
14816- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14817+ pax_exit_kernel_user
14818+ pax_erase_kstack
14819+ andl $~TS_COMPAT,TI_status(%r11)
14820 /* clear IF, that popfq doesn't enable interrupts early */
14821- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
14822+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
14823 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
14824 CFI_REGISTER rip,rdx
14825 RESTORE_ARGS 0,24,0,0,0,0
14826@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14827 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14828 movl %eax,%edi /* 1st arg: syscall number */
14829 call __audit_syscall_entry
14830+
14831+ pax_erase_kstack
14832+
14833 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14834 cmpq $(IA32_NR_syscalls-1),%rax
14835 ja ia32_badsys
14836@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14837 .endm
14838
14839 .macro auditsys_exit exit
14840- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14841+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14842 jnz ia32_ret_from_sys_call
14843 TRACE_IRQS_ON
14844 ENABLE_INTERRUPTS(CLBR_NONE)
14845@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14846 1: setbe %al /* 1 if error, 0 if not */
14847 movzbl %al,%edi /* zero-extend that into %edi */
14848 call __audit_syscall_exit
14849+ GET_THREAD_INFO(%r11)
14850 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14851 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14852 DISABLE_INTERRUPTS(CLBR_NONE)
14853 TRACE_IRQS_OFF
14854- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14855+ testl %edi,TI_flags(%r11)
14856 jz \exit
14857 CLEAR_RREGS -ARGOFFSET
14858 jmp int_with_check
14859@@ -253,7 +306,7 @@ sysenter_fix_flags:
14860
14861 sysenter_tracesys:
14862 #ifdef CONFIG_AUDITSYSCALL
14863- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14864+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14865 jz sysenter_auditsys
14866 #endif
14867 SAVE_REST
14868@@ -265,6 +318,9 @@ sysenter_tracesys:
14869 RESTORE_REST
14870 cmpq $(IA32_NR_syscalls-1),%rax
14871 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14872+
14873+ pax_erase_kstack
14874+
14875 jmp sysenter_do_call
14876 CFI_ENDPROC
14877 ENDPROC(ia32_sysenter_target)
14878@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14879 ENTRY(ia32_cstar_target)
14880 CFI_STARTPROC32 simple
14881 CFI_SIGNAL_FRAME
14882- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14883+ CFI_DEF_CFA rsp,0
14884 CFI_REGISTER rip,rcx
14885 /*CFI_REGISTER rflags,r11*/
14886 SWAPGS_UNSAFE_STACK
14887 movl %esp,%r8d
14888 CFI_REGISTER rsp,r8
14889 movq PER_CPU_VAR(kernel_stack),%rsp
14890+ SAVE_ARGS 8*6,0,0
14891+ pax_enter_kernel_user
14892+
14893+#ifdef CONFIG_PAX_RANDKSTACK
14894+ pax_erase_kstack
14895+#endif
14896+
14897 /*
14898 * No need to follow this irqs on/off section: the syscall
14899 * disabled irqs and here we enable it straight after entry:
14900 */
14901 ENABLE_INTERRUPTS(CLBR_NONE)
14902- SAVE_ARGS 8,0,0
14903 movl %eax,%eax /* zero extension */
14904 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14905 movq %rcx,RIP-ARGOFFSET(%rsp)
14906@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14907 /* no need to do an access_ok check here because r8 has been
14908 32bit zero extended */
14909 /* hardware stack frame is complete now */
14910+
14911+#ifdef CONFIG_PAX_MEMORY_UDEREF
14912+ ASM_PAX_OPEN_USERLAND
14913+ movq pax_user_shadow_base,%r8
14914+ addq RSP-ARGOFFSET(%rsp),%r8
14915+#endif
14916+
14917 ASM_STAC
14918 1: movl (%r8),%r9d
14919 _ASM_EXTABLE(1b,ia32_badarg)
14920 ASM_CLAC
14921- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14922- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14923+
14924+#ifdef CONFIG_PAX_MEMORY_UDEREF
14925+ ASM_PAX_CLOSE_USERLAND
14926+#endif
14927+
14928+ GET_THREAD_INFO(%r11)
14929+ orl $TS_COMPAT,TI_status(%r11)
14930+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14931 CFI_REMEMBER_STATE
14932 jnz cstar_tracesys
14933 cmpq $IA32_NR_syscalls-1,%rax
14934@@ -335,13 +410,16 @@ cstar_do_call:
14935 cstar_dispatch:
14936 call *ia32_sys_call_table(,%rax,8)
14937 movq %rax,RAX-ARGOFFSET(%rsp)
14938+ GET_THREAD_INFO(%r11)
14939 DISABLE_INTERRUPTS(CLBR_NONE)
14940 TRACE_IRQS_OFF
14941- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14942+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14943 jnz sysretl_audit
14944 sysretl_from_sys_call:
14945- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14946- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14947+ pax_exit_kernel_user
14948+ pax_erase_kstack
14949+ andl $~TS_COMPAT,TI_status(%r11)
14950+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14951 movl RIP-ARGOFFSET(%rsp),%ecx
14952 CFI_REGISTER rip,rcx
14953 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14954@@ -368,7 +446,7 @@ sysretl_audit:
14955
14956 cstar_tracesys:
14957 #ifdef CONFIG_AUDITSYSCALL
14958- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14959+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14960 jz cstar_auditsys
14961 #endif
14962 xchgl %r9d,%ebp
14963@@ -382,11 +460,19 @@ cstar_tracesys:
14964 xchgl %ebp,%r9d
14965 cmpq $(IA32_NR_syscalls-1),%rax
14966 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14967+
14968+ pax_erase_kstack
14969+
14970 jmp cstar_do_call
14971 END(ia32_cstar_target)
14972
14973 ia32_badarg:
14974 ASM_CLAC
14975+
14976+#ifdef CONFIG_PAX_MEMORY_UDEREF
14977+ ASM_PAX_CLOSE_USERLAND
14978+#endif
14979+
14980 movq $-EFAULT,%rax
14981 jmp ia32_sysret
14982 CFI_ENDPROC
14983@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
14984 CFI_REL_OFFSET rip,RIP-RIP
14985 PARAVIRT_ADJUST_EXCEPTION_FRAME
14986 SWAPGS
14987- /*
14988- * No need to follow this irqs on/off section: the syscall
14989- * disabled irqs and here we enable it straight after entry:
14990- */
14991- ENABLE_INTERRUPTS(CLBR_NONE)
14992 movl %eax,%eax
14993 pushq_cfi %rax
14994 cld
14995 /* note the registers are not zero extended to the sf.
14996 this could be a problem. */
14997 SAVE_ARGS 0,1,0
14998- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14999- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15000+ pax_enter_kernel_user
15001+
15002+#ifdef CONFIG_PAX_RANDKSTACK
15003+ pax_erase_kstack
15004+#endif
15005+
15006+ /*
15007+ * No need to follow this irqs on/off section: the syscall
15008+ * disabled irqs and here we enable it straight after entry:
15009+ */
15010+ ENABLE_INTERRUPTS(CLBR_NONE)
15011+ GET_THREAD_INFO(%r11)
15012+ orl $TS_COMPAT,TI_status(%r11)
15013+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15014 jnz ia32_tracesys
15015 cmpq $(IA32_NR_syscalls-1),%rax
15016 ja ia32_badsys
15017@@ -458,6 +551,9 @@ ia32_tracesys:
15018 RESTORE_REST
15019 cmpq $(IA32_NR_syscalls-1),%rax
15020 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15021+
15022+ pax_erase_kstack
15023+
15024 jmp ia32_do_call
15025 END(ia32_syscall)
15026
15027diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15028index 8e0ceec..af13504 100644
15029--- a/arch/x86/ia32/sys_ia32.c
15030+++ b/arch/x86/ia32/sys_ia32.c
15031@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15032 */
15033 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15034 {
15035- typeof(ubuf->st_uid) uid = 0;
15036- typeof(ubuf->st_gid) gid = 0;
15037+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15038+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15039 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15040 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15041 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15042diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15043index 372231c..51b537d 100644
15044--- a/arch/x86/include/asm/alternative-asm.h
15045+++ b/arch/x86/include/asm/alternative-asm.h
15046@@ -18,6 +18,45 @@
15047 .endm
15048 #endif
15049
15050+#ifdef KERNEXEC_PLUGIN
15051+ .macro pax_force_retaddr_bts rip=0
15052+ btsq $63,\rip(%rsp)
15053+ .endm
15054+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15055+ .macro pax_force_retaddr rip=0, reload=0
15056+ btsq $63,\rip(%rsp)
15057+ .endm
15058+ .macro pax_force_fptr ptr
15059+ btsq $63,\ptr
15060+ .endm
15061+ .macro pax_set_fptr_mask
15062+ .endm
15063+#endif
15064+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15065+ .macro pax_force_retaddr rip=0, reload=0
15066+ .if \reload
15067+ pax_set_fptr_mask
15068+ .endif
15069+ orq %r12,\rip(%rsp)
15070+ .endm
15071+ .macro pax_force_fptr ptr
15072+ orq %r12,\ptr
15073+ .endm
15074+ .macro pax_set_fptr_mask
15075+ movabs $0x8000000000000000,%r12
15076+ .endm
15077+#endif
15078+#else
15079+ .macro pax_force_retaddr rip=0, reload=0
15080+ .endm
15081+ .macro pax_force_fptr ptr
15082+ .endm
15083+ .macro pax_force_retaddr_bts rip=0
15084+ .endm
15085+ .macro pax_set_fptr_mask
15086+ .endm
15087+#endif
15088+
15089 .macro altinstruction_entry orig alt feature orig_len alt_len
15090 .long \orig - .
15091 .long \alt - .
15092diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15093index 473bdbe..b1e3377 100644
15094--- a/arch/x86/include/asm/alternative.h
15095+++ b/arch/x86/include/asm/alternative.h
15096@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15097 ".pushsection .discard,\"aw\",@progbits\n" \
15098 DISCARD_ENTRY(1) \
15099 ".popsection\n" \
15100- ".pushsection .altinstr_replacement, \"ax\"\n" \
15101+ ".pushsection .altinstr_replacement, \"a\"\n" \
15102 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15103 ".popsection"
15104
15105@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15106 DISCARD_ENTRY(1) \
15107 DISCARD_ENTRY(2) \
15108 ".popsection\n" \
15109- ".pushsection .altinstr_replacement, \"ax\"\n" \
15110+ ".pushsection .altinstr_replacement, \"a\"\n" \
15111 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15112 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15113 ".popsection"
15114diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15115index efc3b22..85c4f3a 100644
15116--- a/arch/x86/include/asm/apic.h
15117+++ b/arch/x86/include/asm/apic.h
15118@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15119
15120 #ifdef CONFIG_X86_LOCAL_APIC
15121
15122-extern unsigned int apic_verbosity;
15123+extern int apic_verbosity;
15124 extern int local_apic_timer_c2_ok;
15125
15126 extern int disable_apic;
15127diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15128index 20370c6..a2eb9b0 100644
15129--- a/arch/x86/include/asm/apm.h
15130+++ b/arch/x86/include/asm/apm.h
15131@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15132 __asm__ __volatile__(APM_DO_ZERO_SEGS
15133 "pushl %%edi\n\t"
15134 "pushl %%ebp\n\t"
15135- "lcall *%%cs:apm_bios_entry\n\t"
15136+ "lcall *%%ss:apm_bios_entry\n\t"
15137 "setc %%al\n\t"
15138 "popl %%ebp\n\t"
15139 "popl %%edi\n\t"
15140@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15141 __asm__ __volatile__(APM_DO_ZERO_SEGS
15142 "pushl %%edi\n\t"
15143 "pushl %%ebp\n\t"
15144- "lcall *%%cs:apm_bios_entry\n\t"
15145+ "lcall *%%ss:apm_bios_entry\n\t"
15146 "setc %%bl\n\t"
15147 "popl %%ebp\n\t"
15148 "popl %%edi\n\t"
15149diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15150index 5e5cd12..51cdc93 100644
15151--- a/arch/x86/include/asm/atomic.h
15152+++ b/arch/x86/include/asm/atomic.h
15153@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15154 }
15155
15156 /**
15157+ * atomic_read_unchecked - read atomic variable
15158+ * @v: pointer of type atomic_unchecked_t
15159+ *
15160+ * Atomically reads the value of @v.
15161+ */
15162+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15163+{
15164+ return ACCESS_ONCE((v)->counter);
15165+}
15166+
15167+/**
15168 * atomic_set - set atomic variable
15169 * @v: pointer of type atomic_t
15170 * @i: required value
15171@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15172 }
15173
15174 /**
15175+ * atomic_set_unchecked - set atomic variable
15176+ * @v: pointer of type atomic_unchecked_t
15177+ * @i: required value
15178+ *
15179+ * Atomically sets the value of @v to @i.
15180+ */
15181+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15182+{
15183+ v->counter = i;
15184+}
15185+
15186+/**
15187 * atomic_add - add integer to atomic variable
15188 * @i: integer value to add
15189 * @v: pointer of type atomic_t
15190@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15191 */
15192 static inline void atomic_add(int i, atomic_t *v)
15193 {
15194- asm volatile(LOCK_PREFIX "addl %1,%0"
15195+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15196+
15197+#ifdef CONFIG_PAX_REFCOUNT
15198+ "jno 0f\n"
15199+ LOCK_PREFIX "subl %1,%0\n"
15200+ "int $4\n0:\n"
15201+ _ASM_EXTABLE(0b, 0b)
15202+#endif
15203+
15204+ : "+m" (v->counter)
15205+ : "ir" (i));
15206+}
15207+
15208+/**
15209+ * atomic_add_unchecked - add integer to atomic variable
15210+ * @i: integer value to add
15211+ * @v: pointer of type atomic_unchecked_t
15212+ *
15213+ * Atomically adds @i to @v.
15214+ */
15215+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15216+{
15217+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15218 : "+m" (v->counter)
15219 : "ir" (i));
15220 }
15221@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15222 */
15223 static inline void atomic_sub(int i, atomic_t *v)
15224 {
15225- asm volatile(LOCK_PREFIX "subl %1,%0"
15226+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15227+
15228+#ifdef CONFIG_PAX_REFCOUNT
15229+ "jno 0f\n"
15230+ LOCK_PREFIX "addl %1,%0\n"
15231+ "int $4\n0:\n"
15232+ _ASM_EXTABLE(0b, 0b)
15233+#endif
15234+
15235+ : "+m" (v->counter)
15236+ : "ir" (i));
15237+}
15238+
15239+/**
15240+ * atomic_sub_unchecked - subtract integer from atomic variable
15241+ * @i: integer value to subtract
15242+ * @v: pointer of type atomic_unchecked_t
15243+ *
15244+ * Atomically subtracts @i from @v.
15245+ */
15246+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15247+{
15248+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15249 : "+m" (v->counter)
15250 : "ir" (i));
15251 }
15252@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15253 */
15254 static inline int atomic_sub_and_test(int i, atomic_t *v)
15255 {
15256- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15257+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15258 }
15259
15260 /**
15261@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15262 */
15263 static inline void atomic_inc(atomic_t *v)
15264 {
15265- asm volatile(LOCK_PREFIX "incl %0"
15266+ asm volatile(LOCK_PREFIX "incl %0\n"
15267+
15268+#ifdef CONFIG_PAX_REFCOUNT
15269+ "jno 0f\n"
15270+ LOCK_PREFIX "decl %0\n"
15271+ "int $4\n0:\n"
15272+ _ASM_EXTABLE(0b, 0b)
15273+#endif
15274+
15275+ : "+m" (v->counter));
15276+}
15277+
15278+/**
15279+ * atomic_inc_unchecked - increment atomic variable
15280+ * @v: pointer of type atomic_unchecked_t
15281+ *
15282+ * Atomically increments @v by 1.
15283+ */
15284+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15285+{
15286+ asm volatile(LOCK_PREFIX "incl %0\n"
15287 : "+m" (v->counter));
15288 }
15289
15290@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15291 */
15292 static inline void atomic_dec(atomic_t *v)
15293 {
15294- asm volatile(LOCK_PREFIX "decl %0"
15295+ asm volatile(LOCK_PREFIX "decl %0\n"
15296+
15297+#ifdef CONFIG_PAX_REFCOUNT
15298+ "jno 0f\n"
15299+ LOCK_PREFIX "incl %0\n"
15300+ "int $4\n0:\n"
15301+ _ASM_EXTABLE(0b, 0b)
15302+#endif
15303+
15304+ : "+m" (v->counter));
15305+}
15306+
15307+/**
15308+ * atomic_dec_unchecked - decrement atomic variable
15309+ * @v: pointer of type atomic_unchecked_t
15310+ *
15311+ * Atomically decrements @v by 1.
15312+ */
15313+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15314+{
15315+ asm volatile(LOCK_PREFIX "decl %0\n"
15316 : "+m" (v->counter));
15317 }
15318
15319@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15320 */
15321 static inline int atomic_dec_and_test(atomic_t *v)
15322 {
15323- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15324+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15325 }
15326
15327 /**
15328@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15329 */
15330 static inline int atomic_inc_and_test(atomic_t *v)
15331 {
15332- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15333+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15334+}
15335+
15336+/**
15337+ * atomic_inc_and_test_unchecked - increment and test
15338+ * @v: pointer of type atomic_unchecked_t
15339+ *
15340+ * Atomically increments @v by 1
15341+ * and returns true if the result is zero, or false for all
15342+ * other cases.
15343+ */
15344+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15345+{
15346+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15347 }
15348
15349 /**
15350@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15351 */
15352 static inline int atomic_add_negative(int i, atomic_t *v)
15353 {
15354- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15355+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15356 }
15357
15358 /**
15359@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15360 *
15361 * Atomically adds @i to @v and returns @i + @v
15362 */
15363-static inline int atomic_add_return(int i, atomic_t *v)
15364+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15365+{
15366+ return i + xadd_check_overflow(&v->counter, i);
15367+}
15368+
15369+/**
15370+ * atomic_add_return_unchecked - add integer and return
15371+ * @i: integer value to add
15372+ * @v: pointer of type atomic_unchecked_t
15373+ *
15374+ * Atomically adds @i to @v and returns @i + @v
15375+ */
15376+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15377 {
15378 return i + xadd(&v->counter, i);
15379 }
15380@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15381 *
15382 * Atomically subtracts @i from @v and returns @v - @i
15383 */
15384-static inline int atomic_sub_return(int i, atomic_t *v)
15385+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15386 {
15387 return atomic_add_return(-i, v);
15388 }
15389
15390 #define atomic_inc_return(v) (atomic_add_return(1, v))
15391+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15392+{
15393+ return atomic_add_return_unchecked(1, v);
15394+}
15395 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15396
15397-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15398+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15399+{
15400+ return cmpxchg(&v->counter, old, new);
15401+}
15402+
15403+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15404 {
15405 return cmpxchg(&v->counter, old, new);
15406 }
15407@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15408 return xchg(&v->counter, new);
15409 }
15410
15411+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15412+{
15413+ return xchg(&v->counter, new);
15414+}
15415+
15416 /**
15417 * __atomic_add_unless - add unless the number is already a given value
15418 * @v: pointer of type atomic_t
15419@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15420 */
15421 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15422 {
15423- int c, old;
15424+ int c, old, new;
15425 c = atomic_read(v);
15426 for (;;) {
15427- if (unlikely(c == (u)))
15428+ if (unlikely(c == u))
15429 break;
15430- old = atomic_cmpxchg((v), c, c + (a));
15431+
15432+ asm volatile("addl %2,%0\n"
15433+
15434+#ifdef CONFIG_PAX_REFCOUNT
15435+ "jno 0f\n"
15436+ "subl %2,%0\n"
15437+ "int $4\n0:\n"
15438+ _ASM_EXTABLE(0b, 0b)
15439+#endif
15440+
15441+ : "=r" (new)
15442+ : "0" (c), "ir" (a));
15443+
15444+ old = atomic_cmpxchg(v, c, new);
15445 if (likely(old == c))
15446 break;
15447 c = old;
15448@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15449 }
15450
15451 /**
15452+ * atomic_inc_not_zero_hint - increment if not null
15453+ * @v: pointer of type atomic_t
15454+ * @hint: probable value of the atomic before the increment
15455+ *
15456+ * This version of atomic_inc_not_zero() gives a hint of probable
15457+ * value of the atomic. This helps processor to not read the memory
15458+ * before doing the atomic read/modify/write cycle, lowering
15459+ * number of bus transactions on some arches.
15460+ *
15461+ * Returns: 0 if increment was not done, 1 otherwise.
15462+ */
15463+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15464+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15465+{
15466+ int val, c = hint, new;
15467+
15468+ /* sanity test, should be removed by compiler if hint is a constant */
15469+ if (!hint)
15470+ return __atomic_add_unless(v, 1, 0);
15471+
15472+ do {
15473+ asm volatile("incl %0\n"
15474+
15475+#ifdef CONFIG_PAX_REFCOUNT
15476+ "jno 0f\n"
15477+ "decl %0\n"
15478+ "int $4\n0:\n"
15479+ _ASM_EXTABLE(0b, 0b)
15480+#endif
15481+
15482+ : "=r" (new)
15483+ : "0" (c));
15484+
15485+ val = atomic_cmpxchg(v, c, new);
15486+ if (val == c)
15487+ return 1;
15488+ c = val;
15489+ } while (c);
15490+
15491+ return 0;
15492+}
15493+
15494+/**
15495 * atomic_inc_short - increment of a short integer
15496 * @v: pointer to type int
15497 *
15498@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15499 }
15500
15501 /* These are x86-specific, used by some header files */
15502-#define atomic_clear_mask(mask, addr) \
15503- asm volatile(LOCK_PREFIX "andl %0,%1" \
15504- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15505+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15506+{
15507+ asm volatile(LOCK_PREFIX "andl %1,%0"
15508+ : "+m" (v->counter)
15509+ : "r" (~(mask))
15510+ : "memory");
15511+}
15512
15513-#define atomic_set_mask(mask, addr) \
15514- asm volatile(LOCK_PREFIX "orl %0,%1" \
15515- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15516- : "memory")
15517+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15518+{
15519+ asm volatile(LOCK_PREFIX "andl %1,%0"
15520+ : "+m" (v->counter)
15521+ : "r" (~(mask))
15522+ : "memory");
15523+}
15524+
15525+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15526+{
15527+ asm volatile(LOCK_PREFIX "orl %1,%0"
15528+ : "+m" (v->counter)
15529+ : "r" (mask)
15530+ : "memory");
15531+}
15532+
15533+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15534+{
15535+ asm volatile(LOCK_PREFIX "orl %1,%0"
15536+ : "+m" (v->counter)
15537+ : "r" (mask)
15538+ : "memory");
15539+}
15540
15541 #ifdef CONFIG_X86_32
15542 # include <asm/atomic64_32.h>
15543diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15544index b154de7..bf18a5a 100644
15545--- a/arch/x86/include/asm/atomic64_32.h
15546+++ b/arch/x86/include/asm/atomic64_32.h
15547@@ -12,6 +12,14 @@ typedef struct {
15548 u64 __aligned(8) counter;
15549 } atomic64_t;
15550
15551+#ifdef CONFIG_PAX_REFCOUNT
15552+typedef struct {
15553+ u64 __aligned(8) counter;
15554+} atomic64_unchecked_t;
15555+#else
15556+typedef atomic64_t atomic64_unchecked_t;
15557+#endif
15558+
15559 #define ATOMIC64_INIT(val) { (val) }
15560
15561 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15562@@ -37,21 +45,31 @@ typedef struct {
15563 ATOMIC64_DECL_ONE(sym##_386)
15564
15565 ATOMIC64_DECL_ONE(add_386);
15566+ATOMIC64_DECL_ONE(add_unchecked_386);
15567 ATOMIC64_DECL_ONE(sub_386);
15568+ATOMIC64_DECL_ONE(sub_unchecked_386);
15569 ATOMIC64_DECL_ONE(inc_386);
15570+ATOMIC64_DECL_ONE(inc_unchecked_386);
15571 ATOMIC64_DECL_ONE(dec_386);
15572+ATOMIC64_DECL_ONE(dec_unchecked_386);
15573 #endif
15574
15575 #define alternative_atomic64(f, out, in...) \
15576 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15577
15578 ATOMIC64_DECL(read);
15579+ATOMIC64_DECL(read_unchecked);
15580 ATOMIC64_DECL(set);
15581+ATOMIC64_DECL(set_unchecked);
15582 ATOMIC64_DECL(xchg);
15583 ATOMIC64_DECL(add_return);
15584+ATOMIC64_DECL(add_return_unchecked);
15585 ATOMIC64_DECL(sub_return);
15586+ATOMIC64_DECL(sub_return_unchecked);
15587 ATOMIC64_DECL(inc_return);
15588+ATOMIC64_DECL(inc_return_unchecked);
15589 ATOMIC64_DECL(dec_return);
15590+ATOMIC64_DECL(dec_return_unchecked);
15591 ATOMIC64_DECL(dec_if_positive);
15592 ATOMIC64_DECL(inc_not_zero);
15593 ATOMIC64_DECL(add_unless);
15594@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15595 }
15596
15597 /**
15598+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15599+ * @p: pointer to type atomic64_unchecked_t
15600+ * @o: expected value
15601+ * @n: new value
15602+ *
15603+ * Atomically sets @v to @n if it was equal to @o and returns
15604+ * the old value.
15605+ */
15606+
15607+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15608+{
15609+ return cmpxchg64(&v->counter, o, n);
15610+}
15611+
15612+/**
15613 * atomic64_xchg - xchg atomic64 variable
15614 * @v: pointer to type atomic64_t
15615 * @n: value to assign
15616@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15617 }
15618
15619 /**
15620+ * atomic64_set_unchecked - set atomic64 variable
15621+ * @v: pointer to type atomic64_unchecked_t
15622+ * @n: value to assign
15623+ *
15624+ * Atomically sets the value of @v to @n.
15625+ */
15626+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15627+{
15628+ unsigned high = (unsigned)(i >> 32);
15629+ unsigned low = (unsigned)i;
15630+ alternative_atomic64(set, /* no output */,
15631+ "S" (v), "b" (low), "c" (high)
15632+ : "eax", "edx", "memory");
15633+}
15634+
15635+/**
15636 * atomic64_read - read atomic64 variable
15637 * @v: pointer to type atomic64_t
15638 *
15639@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15640 }
15641
15642 /**
15643+ * atomic64_read_unchecked - read atomic64 variable
15644+ * @v: pointer to type atomic64_unchecked_t
15645+ *
15646+ * Atomically reads the value of @v and returns it.
15647+ */
15648+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15649+{
15650+ long long r;
15651+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15652+ return r;
15653+ }
15654+
15655+/**
15656 * atomic64_add_return - add and return
15657 * @i: integer value to add
15658 * @v: pointer to type atomic64_t
15659@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15660 return i;
15661 }
15662
15663+/**
15664+ * atomic64_add_return_unchecked - add and return
15665+ * @i: integer value to add
15666+ * @v: pointer to type atomic64_unchecked_t
15667+ *
15668+ * Atomically adds @i to @v and returns @i + *@v
15669+ */
15670+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15671+{
15672+ alternative_atomic64(add_return_unchecked,
15673+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15674+ ASM_NO_INPUT_CLOBBER("memory"));
15675+ return i;
15676+}
15677+
15678 /*
15679 * Other variants with different arithmetic operators:
15680 */
15681@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15682 return a;
15683 }
15684
15685+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15686+{
15687+ long long a;
15688+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15689+ "S" (v) : "memory", "ecx");
15690+ return a;
15691+}
15692+
15693 static inline long long atomic64_dec_return(atomic64_t *v)
15694 {
15695 long long a;
15696@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15697 }
15698
15699 /**
15700+ * atomic64_add_unchecked - add integer to atomic64 variable
15701+ * @i: integer value to add
15702+ * @v: pointer to type atomic64_unchecked_t
15703+ *
15704+ * Atomically adds @i to @v.
15705+ */
15706+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15707+{
15708+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15709+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15710+ ASM_NO_INPUT_CLOBBER("memory"));
15711+ return i;
15712+}
15713+
15714+/**
15715 * atomic64_sub - subtract the atomic64 variable
15716 * @i: integer value to subtract
15717 * @v: pointer to type atomic64_t
15718diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15719index f8d273e..02f39f3 100644
15720--- a/arch/x86/include/asm/atomic64_64.h
15721+++ b/arch/x86/include/asm/atomic64_64.h
15722@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15723 }
15724
15725 /**
15726+ * atomic64_read_unchecked - read atomic64 variable
15727+ * @v: pointer of type atomic64_unchecked_t
15728+ *
15729+ * Atomically reads the value of @v.
15730+ * Doesn't imply a read memory barrier.
15731+ */
15732+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15733+{
15734+ return ACCESS_ONCE((v)->counter);
15735+}
15736+
15737+/**
15738 * atomic64_set - set atomic64 variable
15739 * @v: pointer to type atomic64_t
15740 * @i: required value
15741@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15742 }
15743
15744 /**
15745+ * atomic64_set_unchecked - set atomic64 variable
15746+ * @v: pointer to type atomic64_unchecked_t
15747+ * @i: required value
15748+ *
15749+ * Atomically sets the value of @v to @i.
15750+ */
15751+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15752+{
15753+ v->counter = i;
15754+}
15755+
15756+/**
15757 * atomic64_add - add integer to atomic64 variable
15758 * @i: integer value to add
15759 * @v: pointer to type atomic64_t
15760@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15761 */
15762 static inline void atomic64_add(long i, atomic64_t *v)
15763 {
15764+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15765+
15766+#ifdef CONFIG_PAX_REFCOUNT
15767+ "jno 0f\n"
15768+ LOCK_PREFIX "subq %1,%0\n"
15769+ "int $4\n0:\n"
15770+ _ASM_EXTABLE(0b, 0b)
15771+#endif
15772+
15773+ : "=m" (v->counter)
15774+ : "er" (i), "m" (v->counter));
15775+}
15776+
15777+/**
15778+ * atomic64_add_unchecked - add integer to atomic64 variable
15779+ * @i: integer value to add
15780+ * @v: pointer to type atomic64_unchecked_t
15781+ *
15782+ * Atomically adds @i to @v.
15783+ */
15784+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15785+{
15786 asm volatile(LOCK_PREFIX "addq %1,%0"
15787 : "=m" (v->counter)
15788 : "er" (i), "m" (v->counter));
15789@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15790 */
15791 static inline void atomic64_sub(long i, atomic64_t *v)
15792 {
15793- asm volatile(LOCK_PREFIX "subq %1,%0"
15794+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15795+
15796+#ifdef CONFIG_PAX_REFCOUNT
15797+ "jno 0f\n"
15798+ LOCK_PREFIX "addq %1,%0\n"
15799+ "int $4\n0:\n"
15800+ _ASM_EXTABLE(0b, 0b)
15801+#endif
15802+
15803+ : "=m" (v->counter)
15804+ : "er" (i), "m" (v->counter));
15805+}
15806+
15807+/**
15808+ * atomic64_sub_unchecked - subtract the atomic64 variable
15809+ * @i: integer value to subtract
15810+ * @v: pointer to type atomic64_unchecked_t
15811+ *
15812+ * Atomically subtracts @i from @v.
15813+ */
15814+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15815+{
15816+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15817 : "=m" (v->counter)
15818 : "er" (i), "m" (v->counter));
15819 }
15820@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15821 */
15822 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15823 {
15824- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15825+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15826 }
15827
15828 /**
15829@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15830 */
15831 static inline void atomic64_inc(atomic64_t *v)
15832 {
15833+ asm volatile(LOCK_PREFIX "incq %0\n"
15834+
15835+#ifdef CONFIG_PAX_REFCOUNT
15836+ "jno 0f\n"
15837+ LOCK_PREFIX "decq %0\n"
15838+ "int $4\n0:\n"
15839+ _ASM_EXTABLE(0b, 0b)
15840+#endif
15841+
15842+ : "=m" (v->counter)
15843+ : "m" (v->counter));
15844+}
15845+
15846+/**
15847+ * atomic64_inc_unchecked - increment atomic64 variable
15848+ * @v: pointer to type atomic64_unchecked_t
15849+ *
15850+ * Atomically increments @v by 1.
15851+ */
15852+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15853+{
15854 asm volatile(LOCK_PREFIX "incq %0"
15855 : "=m" (v->counter)
15856 : "m" (v->counter));
15857@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15858 */
15859 static inline void atomic64_dec(atomic64_t *v)
15860 {
15861- asm volatile(LOCK_PREFIX "decq %0"
15862+ asm volatile(LOCK_PREFIX "decq %0\n"
15863+
15864+#ifdef CONFIG_PAX_REFCOUNT
15865+ "jno 0f\n"
15866+ LOCK_PREFIX "incq %0\n"
15867+ "int $4\n0:\n"
15868+ _ASM_EXTABLE(0b, 0b)
15869+#endif
15870+
15871+ : "=m" (v->counter)
15872+ : "m" (v->counter));
15873+}
15874+
15875+/**
15876+ * atomic64_dec_unchecked - decrement atomic64 variable
15877+ * @v: pointer to type atomic64_t
15878+ *
15879+ * Atomically decrements @v by 1.
15880+ */
15881+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15882+{
15883+ asm volatile(LOCK_PREFIX "decq %0\n"
15884 : "=m" (v->counter)
15885 : "m" (v->counter));
15886 }
15887@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15888 */
15889 static inline int atomic64_dec_and_test(atomic64_t *v)
15890 {
15891- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15892+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15893 }
15894
15895 /**
15896@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15897 */
15898 static inline int atomic64_inc_and_test(atomic64_t *v)
15899 {
15900- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15901+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15902 }
15903
15904 /**
15905@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15906 */
15907 static inline int atomic64_add_negative(long i, atomic64_t *v)
15908 {
15909- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15910+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15911 }
15912
15913 /**
15914@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15915 */
15916 static inline long atomic64_add_return(long i, atomic64_t *v)
15917 {
15918+ return i + xadd_check_overflow(&v->counter, i);
15919+}
15920+
15921+/**
15922+ * atomic64_add_return_unchecked - add and return
15923+ * @i: integer value to add
15924+ * @v: pointer to type atomic64_unchecked_t
15925+ *
15926+ * Atomically adds @i to @v and returns @i + @v
15927+ */
15928+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15929+{
15930 return i + xadd(&v->counter, i);
15931 }
15932
15933@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15934 }
15935
15936 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15937+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15938+{
15939+ return atomic64_add_return_unchecked(1, v);
15940+}
15941 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15942
15943 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15944@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15945 return cmpxchg(&v->counter, old, new);
15946 }
15947
15948+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15949+{
15950+ return cmpxchg(&v->counter, old, new);
15951+}
15952+
15953 static inline long atomic64_xchg(atomic64_t *v, long new)
15954 {
15955 return xchg(&v->counter, new);
15956@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15957 */
15958 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15959 {
15960- long c, old;
15961+ long c, old, new;
15962 c = atomic64_read(v);
15963 for (;;) {
15964- if (unlikely(c == (u)))
15965+ if (unlikely(c == u))
15966 break;
15967- old = atomic64_cmpxchg((v), c, c + (a));
15968+
15969+ asm volatile("add %2,%0\n"
15970+
15971+#ifdef CONFIG_PAX_REFCOUNT
15972+ "jno 0f\n"
15973+ "sub %2,%0\n"
15974+ "int $4\n0:\n"
15975+ _ASM_EXTABLE(0b, 0b)
15976+#endif
15977+
15978+ : "=r" (new)
15979+ : "0" (c), "ir" (a));
15980+
15981+ old = atomic64_cmpxchg(v, c, new);
15982 if (likely(old == c))
15983 break;
15984 c = old;
15985 }
15986- return c != (u);
15987+ return c != u;
15988 }
15989
15990 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15991diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
15992index 2ab1eb3..1e8cc5d 100644
15993--- a/arch/x86/include/asm/barrier.h
15994+++ b/arch/x86/include/asm/barrier.h
15995@@ -57,7 +57,7 @@
15996 do { \
15997 compiletime_assert_atomic_type(*p); \
15998 smp_mb(); \
15999- ACCESS_ONCE(*p) = (v); \
16000+ ACCESS_ONCE_RW(*p) = (v); \
16001 } while (0)
16002
16003 #define smp_load_acquire(p) \
16004@@ -74,7 +74,7 @@ do { \
16005 do { \
16006 compiletime_assert_atomic_type(*p); \
16007 barrier(); \
16008- ACCESS_ONCE(*p) = (v); \
16009+ ACCESS_ONCE_RW(*p) = (v); \
16010 } while (0)
16011
16012 #define smp_load_acquire(p) \
16013diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16014index cfe3b95..d01b118 100644
16015--- a/arch/x86/include/asm/bitops.h
16016+++ b/arch/x86/include/asm/bitops.h
16017@@ -50,7 +50,7 @@
16018 * a mask operation on a byte.
16019 */
16020 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16021-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16022+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16023 #define CONST_MASK(nr) (1 << ((nr) & 7))
16024
16025 /**
16026@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16027 */
16028 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16029 {
16030- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16031+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16032 }
16033
16034 /**
16035@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16036 */
16037 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16038 {
16039- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16040+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16041 }
16042
16043 /**
16044@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16045 */
16046 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16047 {
16048- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16049+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16050 }
16051
16052 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16053@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16054 *
16055 * Undefined if no bit exists, so code should check against 0 first.
16056 */
16057-static inline unsigned long __ffs(unsigned long word)
16058+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16059 {
16060 asm("rep; bsf %1,%0"
16061 : "=r" (word)
16062@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16063 *
16064 * Undefined if no zero exists, so code should check against ~0UL first.
16065 */
16066-static inline unsigned long ffz(unsigned long word)
16067+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16068 {
16069 asm("rep; bsf %1,%0"
16070 : "=r" (word)
16071@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16072 *
16073 * Undefined if no set bit exists, so code should check against 0 first.
16074 */
16075-static inline unsigned long __fls(unsigned long word)
16076+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16077 {
16078 asm("bsr %1,%0"
16079 : "=r" (word)
16080@@ -434,7 +434,7 @@ static inline int ffs(int x)
16081 * set bit if value is nonzero. The last (most significant) bit is
16082 * at position 32.
16083 */
16084-static inline int fls(int x)
16085+static inline int __intentional_overflow(-1) fls(int x)
16086 {
16087 int r;
16088
16089@@ -476,7 +476,7 @@ static inline int fls(int x)
16090 * at position 64.
16091 */
16092 #ifdef CONFIG_X86_64
16093-static __always_inline int fls64(__u64 x)
16094+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16095 {
16096 int bitpos = -1;
16097 /*
16098diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16099index 4fa687a..60f2d39 100644
16100--- a/arch/x86/include/asm/boot.h
16101+++ b/arch/x86/include/asm/boot.h
16102@@ -6,10 +6,15 @@
16103 #include <uapi/asm/boot.h>
16104
16105 /* Physical address where kernel should be loaded. */
16106-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16107+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16108 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16109 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16110
16111+#ifndef __ASSEMBLY__
16112+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16113+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16114+#endif
16115+
16116 /* Minimum kernel alignment, as a power of two */
16117 #ifdef CONFIG_X86_64
16118 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16119diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16120index 48f99f1..d78ebf9 100644
16121--- a/arch/x86/include/asm/cache.h
16122+++ b/arch/x86/include/asm/cache.h
16123@@ -5,12 +5,13 @@
16124
16125 /* L1 cache line size */
16126 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16127-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16128+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16129
16130 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16131+#define __read_only __attribute__((__section__(".data..read_only")))
16132
16133 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16134-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16135+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16136
16137 #ifdef CONFIG_X86_VSMP
16138 #ifdef CONFIG_SMP
16139diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16140index 1f1297b..72b8439 100644
16141--- a/arch/x86/include/asm/calling.h
16142+++ b/arch/x86/include/asm/calling.h
16143@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16144 #define RSP 152
16145 #define SS 160
16146
16147-#define ARGOFFSET R11
16148+#define ARGOFFSET R15
16149
16150 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16151- subq $9*8+\addskip, %rsp
16152- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16153- movq_cfi rdi, 8*8
16154- movq_cfi rsi, 7*8
16155- movq_cfi rdx, 6*8
16156+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16157+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16158+ movq_cfi rdi, RDI
16159+ movq_cfi rsi, RSI
16160+ movq_cfi rdx, RDX
16161
16162 .if \save_rcx
16163- movq_cfi rcx, 5*8
16164+ movq_cfi rcx, RCX
16165 .endif
16166
16167 .if \rax_enosys
16168- movq $-ENOSYS, 4*8(%rsp)
16169+ movq $-ENOSYS, RAX(%rsp)
16170 .else
16171- movq_cfi rax, 4*8
16172+ movq_cfi rax, RAX
16173 .endif
16174
16175 .if \save_r891011
16176- movq_cfi r8, 3*8
16177- movq_cfi r9, 2*8
16178- movq_cfi r10, 1*8
16179- movq_cfi r11, 0*8
16180+ movq_cfi r8, R8
16181+ movq_cfi r9, R9
16182+ movq_cfi r10, R10
16183+ movq_cfi r11, R11
16184 .endif
16185
16186+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16187+ movq_cfi r12, R12
16188+#endif
16189+
16190 .endm
16191
16192-#define ARG_SKIP (9*8)
16193+#define ARG_SKIP ORIG_RAX
16194
16195 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16196 rstor_r8910=1, rstor_rdx=1
16197+
16198+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16199+ movq_cfi_restore R12, r12
16200+#endif
16201+
16202 .if \rstor_r11
16203- movq_cfi_restore 0*8, r11
16204+ movq_cfi_restore R11, r11
16205 .endif
16206
16207 .if \rstor_r8910
16208- movq_cfi_restore 1*8, r10
16209- movq_cfi_restore 2*8, r9
16210- movq_cfi_restore 3*8, r8
16211+ movq_cfi_restore R10, r10
16212+ movq_cfi_restore R9, r9
16213+ movq_cfi_restore R8, r8
16214 .endif
16215
16216 .if \rstor_rax
16217- movq_cfi_restore 4*8, rax
16218+ movq_cfi_restore RAX, rax
16219 .endif
16220
16221 .if \rstor_rcx
16222- movq_cfi_restore 5*8, rcx
16223+ movq_cfi_restore RCX, rcx
16224 .endif
16225
16226 .if \rstor_rdx
16227- movq_cfi_restore 6*8, rdx
16228+ movq_cfi_restore RDX, rdx
16229 .endif
16230
16231- movq_cfi_restore 7*8, rsi
16232- movq_cfi_restore 8*8, rdi
16233+ movq_cfi_restore RSI, rsi
16234+ movq_cfi_restore RDI, rdi
16235
16236- .if ARG_SKIP+\addskip > 0
16237- addq $ARG_SKIP+\addskip, %rsp
16238- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16239+ .if ORIG_RAX+\addskip > 0
16240+ addq $ORIG_RAX+\addskip, %rsp
16241+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16242 .endif
16243 .endm
16244
16245- .macro LOAD_ARGS offset, skiprax=0
16246- movq \offset(%rsp), %r11
16247- movq \offset+8(%rsp), %r10
16248- movq \offset+16(%rsp), %r9
16249- movq \offset+24(%rsp), %r8
16250- movq \offset+40(%rsp), %rcx
16251- movq \offset+48(%rsp), %rdx
16252- movq \offset+56(%rsp), %rsi
16253- movq \offset+64(%rsp), %rdi
16254+ .macro LOAD_ARGS skiprax=0
16255+ movq R11(%rsp), %r11
16256+ movq R10(%rsp), %r10
16257+ movq R9(%rsp), %r9
16258+ movq R8(%rsp), %r8
16259+ movq RCX(%rsp), %rcx
16260+ movq RDX(%rsp), %rdx
16261+ movq RSI(%rsp), %rsi
16262+ movq RDI(%rsp), %rdi
16263 .if \skiprax
16264 .else
16265- movq \offset+72(%rsp), %rax
16266+ movq ORIG_RAX(%rsp), %rax
16267 .endif
16268 .endm
16269
16270-#define REST_SKIP (6*8)
16271-
16272 .macro SAVE_REST
16273- subq $REST_SKIP, %rsp
16274- CFI_ADJUST_CFA_OFFSET REST_SKIP
16275- movq_cfi rbx, 5*8
16276- movq_cfi rbp, 4*8
16277- movq_cfi r12, 3*8
16278- movq_cfi r13, 2*8
16279- movq_cfi r14, 1*8
16280- movq_cfi r15, 0*8
16281+ movq_cfi rbx, RBX
16282+ movq_cfi rbp, RBP
16283+
16284+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16285+ movq_cfi r12, R12
16286+#endif
16287+
16288+ movq_cfi r13, R13
16289+ movq_cfi r14, R14
16290+ movq_cfi r15, R15
16291 .endm
16292
16293 .macro RESTORE_REST
16294- movq_cfi_restore 0*8, r15
16295- movq_cfi_restore 1*8, r14
16296- movq_cfi_restore 2*8, r13
16297- movq_cfi_restore 3*8, r12
16298- movq_cfi_restore 4*8, rbp
16299- movq_cfi_restore 5*8, rbx
16300- addq $REST_SKIP, %rsp
16301- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16302+ movq_cfi_restore R15, r15
16303+ movq_cfi_restore R14, r14
16304+ movq_cfi_restore R13, r13
16305+
16306+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16307+ movq_cfi_restore R12, r12
16308+#endif
16309+
16310+ movq_cfi_restore RBP, rbp
16311+ movq_cfi_restore RBX, rbx
16312 .endm
16313
16314 .macro SAVE_ALL
16315diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16316index f50de69..2b0a458 100644
16317--- a/arch/x86/include/asm/checksum_32.h
16318+++ b/arch/x86/include/asm/checksum_32.h
16319@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16320 int len, __wsum sum,
16321 int *src_err_ptr, int *dst_err_ptr);
16322
16323+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16324+ int len, __wsum sum,
16325+ int *src_err_ptr, int *dst_err_ptr);
16326+
16327+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16328+ int len, __wsum sum,
16329+ int *src_err_ptr, int *dst_err_ptr);
16330+
16331 /*
16332 * Note: when you get a NULL pointer exception here this means someone
16333 * passed in an incorrect kernel address to one of these functions.
16334@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16335
16336 might_sleep();
16337 stac();
16338- ret = csum_partial_copy_generic((__force void *)src, dst,
16339+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16340 len, sum, err_ptr, NULL);
16341 clac();
16342
16343@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16344 might_sleep();
16345 if (access_ok(VERIFY_WRITE, dst, len)) {
16346 stac();
16347- ret = csum_partial_copy_generic(src, (__force void *)dst,
16348+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16349 len, sum, NULL, err_ptr);
16350 clac();
16351 return ret;
16352diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16353index 99c105d7..2f667ac 100644
16354--- a/arch/x86/include/asm/cmpxchg.h
16355+++ b/arch/x86/include/asm/cmpxchg.h
16356@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16357 __compiletime_error("Bad argument size for cmpxchg");
16358 extern void __xadd_wrong_size(void)
16359 __compiletime_error("Bad argument size for xadd");
16360+extern void __xadd_check_overflow_wrong_size(void)
16361+ __compiletime_error("Bad argument size for xadd_check_overflow");
16362 extern void __add_wrong_size(void)
16363 __compiletime_error("Bad argument size for add");
16364+extern void __add_check_overflow_wrong_size(void)
16365+ __compiletime_error("Bad argument size for add_check_overflow");
16366
16367 /*
16368 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16369@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16370 __ret; \
16371 })
16372
16373+#ifdef CONFIG_PAX_REFCOUNT
16374+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16375+ ({ \
16376+ __typeof__ (*(ptr)) __ret = (arg); \
16377+ switch (sizeof(*(ptr))) { \
16378+ case __X86_CASE_L: \
16379+ asm volatile (lock #op "l %0, %1\n" \
16380+ "jno 0f\n" \
16381+ "mov %0,%1\n" \
16382+ "int $4\n0:\n" \
16383+ _ASM_EXTABLE(0b, 0b) \
16384+ : "+r" (__ret), "+m" (*(ptr)) \
16385+ : : "memory", "cc"); \
16386+ break; \
16387+ case __X86_CASE_Q: \
16388+ asm volatile (lock #op "q %q0, %1\n" \
16389+ "jno 0f\n" \
16390+ "mov %0,%1\n" \
16391+ "int $4\n0:\n" \
16392+ _ASM_EXTABLE(0b, 0b) \
16393+ : "+r" (__ret), "+m" (*(ptr)) \
16394+ : : "memory", "cc"); \
16395+ break; \
16396+ default: \
16397+ __ ## op ## _check_overflow_wrong_size(); \
16398+ } \
16399+ __ret; \
16400+ })
16401+#else
16402+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16403+#endif
16404+
16405 /*
16406 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16407 * Since this is generally used to protect other memory information, we
16408@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16409 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16410 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16411
16412+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16413+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16414+
16415 #define __add(ptr, inc, lock) \
16416 ({ \
16417 __typeof__ (*(ptr)) __ret = (inc); \
16418diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16419index 59c6c40..5e0b22c 100644
16420--- a/arch/x86/include/asm/compat.h
16421+++ b/arch/x86/include/asm/compat.h
16422@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16423 typedef u32 compat_uint_t;
16424 typedef u32 compat_ulong_t;
16425 typedef u64 __attribute__((aligned(4))) compat_u64;
16426-typedef u32 compat_uptr_t;
16427+typedef u32 __user compat_uptr_t;
16428
16429 struct compat_timespec {
16430 compat_time_t tv_sec;
16431diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16432index 90a5485..43b6211 100644
16433--- a/arch/x86/include/asm/cpufeature.h
16434+++ b/arch/x86/include/asm/cpufeature.h
16435@@ -213,7 +213,7 @@
16436 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16437 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16438 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16439-
16440+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16441
16442 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16443 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16444@@ -221,7 +221,7 @@
16445 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16446 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16447 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16448-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16449+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16450 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16451 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16452 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16453@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16454 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16455 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16456 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16457+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16458
16459 #if __GNUC__ >= 4
16460 extern void warn_pre_alternatives(void);
16461@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16462
16463 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16464 t_warn:
16465- warn_pre_alternatives();
16466+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16467+ warn_pre_alternatives();
16468 return false;
16469 #endif
16470
16471@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16472 ".section .discard,\"aw\",@progbits\n"
16473 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16474 ".previous\n"
16475- ".section .altinstr_replacement,\"ax\"\n"
16476+ ".section .altinstr_replacement,\"a\"\n"
16477 "3: movb $1,%0\n"
16478 "4:\n"
16479 ".previous\n"
16480@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16481 " .byte 2b - 1b\n" /* src len */
16482 " .byte 4f - 3f\n" /* repl len */
16483 ".previous\n"
16484- ".section .altinstr_replacement,\"ax\"\n"
16485+ ".section .altinstr_replacement,\"a\"\n"
16486 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16487 "4:\n"
16488 ".previous\n"
16489@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16490 ".section .discard,\"aw\",@progbits\n"
16491 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16492 ".previous\n"
16493- ".section .altinstr_replacement,\"ax\"\n"
16494+ ".section .altinstr_replacement,\"a\"\n"
16495 "3: movb $0,%0\n"
16496 "4:\n"
16497 ".previous\n"
16498@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16499 ".section .discard,\"aw\",@progbits\n"
16500 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16501 ".previous\n"
16502- ".section .altinstr_replacement,\"ax\"\n"
16503+ ".section .altinstr_replacement,\"a\"\n"
16504 "5: movb $1,%0\n"
16505 "6:\n"
16506 ".previous\n"
16507diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16508index a94b82e..59ecefa 100644
16509--- a/arch/x86/include/asm/desc.h
16510+++ b/arch/x86/include/asm/desc.h
16511@@ -4,6 +4,7 @@
16512 #include <asm/desc_defs.h>
16513 #include <asm/ldt.h>
16514 #include <asm/mmu.h>
16515+#include <asm/pgtable.h>
16516
16517 #include <linux/smp.h>
16518 #include <linux/percpu.h>
16519@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16520
16521 desc->type = (info->read_exec_only ^ 1) << 1;
16522 desc->type |= info->contents << 2;
16523+ desc->type |= info->seg_not_present ^ 1;
16524
16525 desc->s = 1;
16526 desc->dpl = 0x3;
16527@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16528 }
16529
16530 extern struct desc_ptr idt_descr;
16531-extern gate_desc idt_table[];
16532-extern struct desc_ptr debug_idt_descr;
16533-extern gate_desc debug_idt_table[];
16534-
16535-struct gdt_page {
16536- struct desc_struct gdt[GDT_ENTRIES];
16537-} __attribute__((aligned(PAGE_SIZE)));
16538-
16539-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16540+extern gate_desc idt_table[IDT_ENTRIES];
16541+extern const struct desc_ptr debug_idt_descr;
16542+extern gate_desc debug_idt_table[IDT_ENTRIES];
16543
16544+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16545 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16546 {
16547- return per_cpu(gdt_page, cpu).gdt;
16548+ return cpu_gdt_table[cpu];
16549 }
16550
16551 #ifdef CONFIG_X86_64
16552@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16553 unsigned long base, unsigned dpl, unsigned flags,
16554 unsigned short seg)
16555 {
16556- gate->a = (seg << 16) | (base & 0xffff);
16557- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16558+ gate->gate.offset_low = base;
16559+ gate->gate.seg = seg;
16560+ gate->gate.reserved = 0;
16561+ gate->gate.type = type;
16562+ gate->gate.s = 0;
16563+ gate->gate.dpl = dpl;
16564+ gate->gate.p = 1;
16565+ gate->gate.offset_high = base >> 16;
16566 }
16567
16568 #endif
16569@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16570
16571 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16572 {
16573+ pax_open_kernel();
16574 memcpy(&idt[entry], gate, sizeof(*gate));
16575+ pax_close_kernel();
16576 }
16577
16578 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16579 {
16580+ pax_open_kernel();
16581 memcpy(&ldt[entry], desc, 8);
16582+ pax_close_kernel();
16583 }
16584
16585 static inline void
16586@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16587 default: size = sizeof(*gdt); break;
16588 }
16589
16590+ pax_open_kernel();
16591 memcpy(&gdt[entry], desc, size);
16592+ pax_close_kernel();
16593 }
16594
16595 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16596@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16597
16598 static inline void native_load_tr_desc(void)
16599 {
16600+ pax_open_kernel();
16601 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16602+ pax_close_kernel();
16603 }
16604
16605 static inline void native_load_gdt(const struct desc_ptr *dtr)
16606@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16607 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16608 unsigned int i;
16609
16610+ pax_open_kernel();
16611 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16612 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16613+ pax_close_kernel();
16614 }
16615
16616 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16617@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16618 preempt_enable();
16619 }
16620
16621-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16622+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16623 {
16624 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16625 }
16626@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16627 }
16628
16629 #ifdef CONFIG_X86_64
16630-static inline void set_nmi_gate(int gate, void *addr)
16631+static inline void set_nmi_gate(int gate, const void *addr)
16632 {
16633 gate_desc s;
16634
16635@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16636 #endif
16637
16638 #ifdef CONFIG_TRACING
16639-extern struct desc_ptr trace_idt_descr;
16640-extern gate_desc trace_idt_table[];
16641+extern const struct desc_ptr trace_idt_descr;
16642+extern gate_desc trace_idt_table[IDT_ENTRIES];
16643 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16644 {
16645 write_idt_entry(trace_idt_table, entry, gate);
16646 }
16647
16648-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16649+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16650 unsigned dpl, unsigned ist, unsigned seg)
16651 {
16652 gate_desc s;
16653@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16654 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16655 #endif
16656
16657-static inline void _set_gate(int gate, unsigned type, void *addr,
16658+static inline void _set_gate(int gate, unsigned type, const void *addr,
16659 unsigned dpl, unsigned ist, unsigned seg)
16660 {
16661 gate_desc s;
16662@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16663 #define set_intr_gate(n, addr) \
16664 do { \
16665 BUG_ON((unsigned)n > 0xFF); \
16666- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16667+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16668 __KERNEL_CS); \
16669- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16670+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16671 0, 0, __KERNEL_CS); \
16672 } while (0)
16673
16674@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16675 /*
16676 * This routine sets up an interrupt gate at directory privilege level 3.
16677 */
16678-static inline void set_system_intr_gate(unsigned int n, void *addr)
16679+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16680 {
16681 BUG_ON((unsigned)n > 0xFF);
16682 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16683 }
16684
16685-static inline void set_system_trap_gate(unsigned int n, void *addr)
16686+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16687 {
16688 BUG_ON((unsigned)n > 0xFF);
16689 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16690 }
16691
16692-static inline void set_trap_gate(unsigned int n, void *addr)
16693+static inline void set_trap_gate(unsigned int n, const void *addr)
16694 {
16695 BUG_ON((unsigned)n > 0xFF);
16696 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16697@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16698 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16699 {
16700 BUG_ON((unsigned)n > 0xFF);
16701- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16702+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16703 }
16704
16705-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16706+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16707 {
16708 BUG_ON((unsigned)n > 0xFF);
16709 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16710 }
16711
16712-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16713+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16714 {
16715 BUG_ON((unsigned)n > 0xFF);
16716 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16717@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16718 else
16719 load_idt((const struct desc_ptr *)&idt_descr);
16720 }
16721+
16722+#ifdef CONFIG_X86_32
16723+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16724+{
16725+ struct desc_struct d;
16726+
16727+ if (likely(limit))
16728+ limit = (limit - 1UL) >> PAGE_SHIFT;
16729+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16730+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16731+}
16732+#endif
16733+
16734 #endif /* _ASM_X86_DESC_H */
16735diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16736index 278441f..b95a174 100644
16737--- a/arch/x86/include/asm/desc_defs.h
16738+++ b/arch/x86/include/asm/desc_defs.h
16739@@ -31,6 +31,12 @@ struct desc_struct {
16740 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16741 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16742 };
16743+ struct {
16744+ u16 offset_low;
16745+ u16 seg;
16746+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16747+ unsigned offset_high: 16;
16748+ } gate;
16749 };
16750 } __attribute__((packed));
16751
16752diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16753index ced283a..ffe04cc 100644
16754--- a/arch/x86/include/asm/div64.h
16755+++ b/arch/x86/include/asm/div64.h
16756@@ -39,7 +39,7 @@
16757 __mod; \
16758 })
16759
16760-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16761+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16762 {
16763 union {
16764 u64 v64;
16765diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16766index ca3347a..1a5082a 100644
16767--- a/arch/x86/include/asm/elf.h
16768+++ b/arch/x86/include/asm/elf.h
16769@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16770
16771 #include <asm/vdso.h>
16772
16773-#ifdef CONFIG_X86_64
16774-extern unsigned int vdso64_enabled;
16775-#endif
16776 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16777 extern unsigned int vdso32_enabled;
16778 #endif
16779@@ -249,7 +246,25 @@ extern int force_personality32;
16780 the loader. We need to make sure that it is out of the way of the program
16781 that it will "exec", and that there is sufficient room for the brk. */
16782
16783+#ifdef CONFIG_PAX_SEGMEXEC
16784+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16785+#else
16786 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16787+#endif
16788+
16789+#ifdef CONFIG_PAX_ASLR
16790+#ifdef CONFIG_X86_32
16791+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16792+
16793+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16794+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16795+#else
16796+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16797+
16798+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16799+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16800+#endif
16801+#endif
16802
16803 /* This yields a mask that user programs can use to figure out what
16804 instruction set this CPU supports. This could be done in user space,
16805@@ -298,17 +313,13 @@ do { \
16806
16807 #define ARCH_DLINFO \
16808 do { \
16809- if (vdso64_enabled) \
16810- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16811- (unsigned long __force)current->mm->context.vdso); \
16812+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16813 } while (0)
16814
16815 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16816 #define ARCH_DLINFO_X32 \
16817 do { \
16818- if (vdso64_enabled) \
16819- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16820- (unsigned long __force)current->mm->context.vdso); \
16821+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16822 } while (0)
16823
16824 #define AT_SYSINFO 32
16825@@ -323,10 +334,10 @@ else \
16826
16827 #endif /* !CONFIG_X86_32 */
16828
16829-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16830+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16831
16832 #define VDSO_ENTRY \
16833- ((unsigned long)current->mm->context.vdso + \
16834+ (current->mm->context.vdso + \
16835 selected_vdso32->sym___kernel_vsyscall)
16836
16837 struct linux_binprm;
16838@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16839 int uses_interp);
16840 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16841
16842-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16843-#define arch_randomize_brk arch_randomize_brk
16844-
16845 /*
16846 * True on X86_32 or when emulating IA32 on X86_64
16847 */
16848diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16849index 77a99ac..39ff7f5 100644
16850--- a/arch/x86/include/asm/emergency-restart.h
16851+++ b/arch/x86/include/asm/emergency-restart.h
16852@@ -1,6 +1,6 @@
16853 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16854 #define _ASM_X86_EMERGENCY_RESTART_H
16855
16856-extern void machine_emergency_restart(void);
16857+extern void machine_emergency_restart(void) __noreturn;
16858
16859 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16860diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16861index 1c7eefe..d0e4702 100644
16862--- a/arch/x86/include/asm/floppy.h
16863+++ b/arch/x86/include/asm/floppy.h
16864@@ -229,18 +229,18 @@ static struct fd_routine_l {
16865 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16866 } fd_routine[] = {
16867 {
16868- request_dma,
16869- free_dma,
16870- get_dma_residue,
16871- dma_mem_alloc,
16872- hard_dma_setup
16873+ ._request_dma = request_dma,
16874+ ._free_dma = free_dma,
16875+ ._get_dma_residue = get_dma_residue,
16876+ ._dma_mem_alloc = dma_mem_alloc,
16877+ ._dma_setup = hard_dma_setup
16878 },
16879 {
16880- vdma_request_dma,
16881- vdma_nop,
16882- vdma_get_dma_residue,
16883- vdma_mem_alloc,
16884- vdma_dma_setup
16885+ ._request_dma = vdma_request_dma,
16886+ ._free_dma = vdma_nop,
16887+ ._get_dma_residue = vdma_get_dma_residue,
16888+ ._dma_mem_alloc = vdma_mem_alloc,
16889+ ._dma_setup = vdma_dma_setup
16890 }
16891 };
16892
16893diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16894index 72ba21a..79f3f66 100644
16895--- a/arch/x86/include/asm/fpu-internal.h
16896+++ b/arch/x86/include/asm/fpu-internal.h
16897@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16898 #define user_insn(insn, output, input...) \
16899 ({ \
16900 int err; \
16901+ pax_open_userland(); \
16902 asm volatile(ASM_STAC "\n" \
16903- "1:" #insn "\n\t" \
16904+ "1:" \
16905+ __copyuser_seg \
16906+ #insn "\n\t" \
16907 "2: " ASM_CLAC "\n" \
16908 ".section .fixup,\"ax\"\n" \
16909 "3: movl $-1,%[err]\n" \
16910@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16911 _ASM_EXTABLE(1b, 3b) \
16912 : [err] "=r" (err), output \
16913 : "0"(0), input); \
16914+ pax_close_userland(); \
16915 err; \
16916 })
16917
16918@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16919 "fnclex\n\t"
16920 "emms\n\t"
16921 "fildl %P[addr]" /* set F?P to defined value */
16922- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16923+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16924 }
16925
16926 return fpu_restore_checking(&tsk->thread.fpu);
16927diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16928index b4c1f54..e290c08 100644
16929--- a/arch/x86/include/asm/futex.h
16930+++ b/arch/x86/include/asm/futex.h
16931@@ -12,6 +12,7 @@
16932 #include <asm/smap.h>
16933
16934 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16935+ typecheck(u32 __user *, uaddr); \
16936 asm volatile("\t" ASM_STAC "\n" \
16937 "1:\t" insn "\n" \
16938 "2:\t" ASM_CLAC "\n" \
16939@@ -20,15 +21,16 @@
16940 "\tjmp\t2b\n" \
16941 "\t.previous\n" \
16942 _ASM_EXTABLE(1b, 3b) \
16943- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16944+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16945 : "i" (-EFAULT), "0" (oparg), "1" (0))
16946
16947 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16948+ typecheck(u32 __user *, uaddr); \
16949 asm volatile("\t" ASM_STAC "\n" \
16950 "1:\tmovl %2, %0\n" \
16951 "\tmovl\t%0, %3\n" \
16952 "\t" insn "\n" \
16953- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16954+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16955 "\tjnz\t1b\n" \
16956 "3:\t" ASM_CLAC "\n" \
16957 "\t.section .fixup,\"ax\"\n" \
16958@@ -38,7 +40,7 @@
16959 _ASM_EXTABLE(1b, 4b) \
16960 _ASM_EXTABLE(2b, 4b) \
16961 : "=&a" (oldval), "=&r" (ret), \
16962- "+m" (*uaddr), "=&r" (tem) \
16963+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16964 : "r" (oparg), "i" (-EFAULT), "1" (0))
16965
16966 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16967@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16968
16969 pagefault_disable();
16970
16971+ pax_open_userland();
16972 switch (op) {
16973 case FUTEX_OP_SET:
16974- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16975+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16976 break;
16977 case FUTEX_OP_ADD:
16978- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16979+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16980 uaddr, oparg);
16981 break;
16982 case FUTEX_OP_OR:
16983@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16984 default:
16985 ret = -ENOSYS;
16986 }
16987+ pax_close_userland();
16988
16989 pagefault_enable();
16990
16991diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16992index 9662290..49ca5e5 100644
16993--- a/arch/x86/include/asm/hw_irq.h
16994+++ b/arch/x86/include/asm/hw_irq.h
16995@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
16996 #endif /* CONFIG_X86_LOCAL_APIC */
16997
16998 /* Statistics */
16999-extern atomic_t irq_err_count;
17000-extern atomic_t irq_mis_count;
17001+extern atomic_unchecked_t irq_err_count;
17002+extern atomic_unchecked_t irq_mis_count;
17003
17004 /* EISA */
17005 extern void eisa_set_level_irq(unsigned int irq);
17006diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17007index ccffa53..3c90c87 100644
17008--- a/arch/x86/include/asm/i8259.h
17009+++ b/arch/x86/include/asm/i8259.h
17010@@ -62,7 +62,7 @@ struct legacy_pic {
17011 void (*init)(int auto_eoi);
17012 int (*irq_pending)(unsigned int irq);
17013 void (*make_irq)(unsigned int irq);
17014-};
17015+} __do_const;
17016
17017 extern struct legacy_pic *legacy_pic;
17018 extern struct legacy_pic null_legacy_pic;
17019diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17020index 34a5b93..27e40a6 100644
17021--- a/arch/x86/include/asm/io.h
17022+++ b/arch/x86/include/asm/io.h
17023@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17024 "m" (*(volatile type __force *)addr) barrier); }
17025
17026 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17027-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17028-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17029+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17030+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17031
17032 build_mmio_read(__readb, "b", unsigned char, "=q", )
17033-build_mmio_read(__readw, "w", unsigned short, "=r", )
17034-build_mmio_read(__readl, "l", unsigned int, "=r", )
17035+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17036+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17037
17038 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17039 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17040@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17041 * this function
17042 */
17043
17044-static inline phys_addr_t virt_to_phys(volatile void *address)
17045+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17046 {
17047 return __pa(address);
17048 }
17049@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17050 return ioremap_nocache(offset, size);
17051 }
17052
17053-extern void iounmap(volatile void __iomem *addr);
17054+extern void iounmap(const volatile void __iomem *addr);
17055
17056 extern void set_iounmap_nonlazy(void);
17057
17058@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17059
17060 #include <linux/vmalloc.h>
17061
17062+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17063+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17064+{
17065+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17066+}
17067+
17068+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17069+{
17070+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17071+}
17072+
17073 /*
17074 * Convert a virtual cached pointer to an uncached pointer
17075 */
17076diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17077index 0a8b519..80e7d5b 100644
17078--- a/arch/x86/include/asm/irqflags.h
17079+++ b/arch/x86/include/asm/irqflags.h
17080@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17081 sti; \
17082 sysexit
17083
17084+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17085+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17086+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17087+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17088+
17089 #else
17090 #define INTERRUPT_RETURN iret
17091 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17092diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17093index 4421b5d..8543006 100644
17094--- a/arch/x86/include/asm/kprobes.h
17095+++ b/arch/x86/include/asm/kprobes.h
17096@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17097 #define RELATIVEJUMP_SIZE 5
17098 #define RELATIVECALL_OPCODE 0xe8
17099 #define RELATIVE_ADDR_SIZE 4
17100-#define MAX_STACK_SIZE 64
17101-#define MIN_STACK_SIZE(ADDR) \
17102- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17103- THREAD_SIZE - (unsigned long)(ADDR))) \
17104- ? (MAX_STACK_SIZE) \
17105- : (((unsigned long)current_thread_info()) + \
17106- THREAD_SIZE - (unsigned long)(ADDR)))
17107+#define MAX_STACK_SIZE 64UL
17108+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17109
17110 #define flush_insn_slot(p) do { } while (0)
17111
17112diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17113index 4ad6560..75c7bdd 100644
17114--- a/arch/x86/include/asm/local.h
17115+++ b/arch/x86/include/asm/local.h
17116@@ -10,33 +10,97 @@ typedef struct {
17117 atomic_long_t a;
17118 } local_t;
17119
17120+typedef struct {
17121+ atomic_long_unchecked_t a;
17122+} local_unchecked_t;
17123+
17124 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17125
17126 #define local_read(l) atomic_long_read(&(l)->a)
17127+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17128 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17129+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17130
17131 static inline void local_inc(local_t *l)
17132 {
17133- asm volatile(_ASM_INC "%0"
17134+ asm volatile(_ASM_INC "%0\n"
17135+
17136+#ifdef CONFIG_PAX_REFCOUNT
17137+ "jno 0f\n"
17138+ _ASM_DEC "%0\n"
17139+ "int $4\n0:\n"
17140+ _ASM_EXTABLE(0b, 0b)
17141+#endif
17142+
17143+ : "+m" (l->a.counter));
17144+}
17145+
17146+static inline void local_inc_unchecked(local_unchecked_t *l)
17147+{
17148+ asm volatile(_ASM_INC "%0\n"
17149 : "+m" (l->a.counter));
17150 }
17151
17152 static inline void local_dec(local_t *l)
17153 {
17154- asm volatile(_ASM_DEC "%0"
17155+ asm volatile(_ASM_DEC "%0\n"
17156+
17157+#ifdef CONFIG_PAX_REFCOUNT
17158+ "jno 0f\n"
17159+ _ASM_INC "%0\n"
17160+ "int $4\n0:\n"
17161+ _ASM_EXTABLE(0b, 0b)
17162+#endif
17163+
17164+ : "+m" (l->a.counter));
17165+}
17166+
17167+static inline void local_dec_unchecked(local_unchecked_t *l)
17168+{
17169+ asm volatile(_ASM_DEC "%0\n"
17170 : "+m" (l->a.counter));
17171 }
17172
17173 static inline void local_add(long i, local_t *l)
17174 {
17175- asm volatile(_ASM_ADD "%1,%0"
17176+ asm volatile(_ASM_ADD "%1,%0\n"
17177+
17178+#ifdef CONFIG_PAX_REFCOUNT
17179+ "jno 0f\n"
17180+ _ASM_SUB "%1,%0\n"
17181+ "int $4\n0:\n"
17182+ _ASM_EXTABLE(0b, 0b)
17183+#endif
17184+
17185+ : "+m" (l->a.counter)
17186+ : "ir" (i));
17187+}
17188+
17189+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17190+{
17191+ asm volatile(_ASM_ADD "%1,%0\n"
17192 : "+m" (l->a.counter)
17193 : "ir" (i));
17194 }
17195
17196 static inline void local_sub(long i, local_t *l)
17197 {
17198- asm volatile(_ASM_SUB "%1,%0"
17199+ asm volatile(_ASM_SUB "%1,%0\n"
17200+
17201+#ifdef CONFIG_PAX_REFCOUNT
17202+ "jno 0f\n"
17203+ _ASM_ADD "%1,%0\n"
17204+ "int $4\n0:\n"
17205+ _ASM_EXTABLE(0b, 0b)
17206+#endif
17207+
17208+ : "+m" (l->a.counter)
17209+ : "ir" (i));
17210+}
17211+
17212+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17213+{
17214+ asm volatile(_ASM_SUB "%1,%0\n"
17215 : "+m" (l->a.counter)
17216 : "ir" (i));
17217 }
17218@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17219 */
17220 static inline int local_sub_and_test(long i, local_t *l)
17221 {
17222- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17223+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17224 }
17225
17226 /**
17227@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17228 */
17229 static inline int local_dec_and_test(local_t *l)
17230 {
17231- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17232+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17233 }
17234
17235 /**
17236@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17237 */
17238 static inline int local_inc_and_test(local_t *l)
17239 {
17240- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17241+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17242 }
17243
17244 /**
17245@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17246 */
17247 static inline int local_add_negative(long i, local_t *l)
17248 {
17249- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17250+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17251 }
17252
17253 /**
17254@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17255 static inline long local_add_return(long i, local_t *l)
17256 {
17257 long __i = i;
17258+ asm volatile(_ASM_XADD "%0, %1\n"
17259+
17260+#ifdef CONFIG_PAX_REFCOUNT
17261+ "jno 0f\n"
17262+ _ASM_MOV "%0,%1\n"
17263+ "int $4\n0:\n"
17264+ _ASM_EXTABLE(0b, 0b)
17265+#endif
17266+
17267+ : "+r" (i), "+m" (l->a.counter)
17268+ : : "memory");
17269+ return i + __i;
17270+}
17271+
17272+/**
17273+ * local_add_return_unchecked - add and return
17274+ * @i: integer value to add
17275+ * @l: pointer to type local_unchecked_t
17276+ *
17277+ * Atomically adds @i to @l and returns @i + @l
17278+ */
17279+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17280+{
17281+ long __i = i;
17282 asm volatile(_ASM_XADD "%0, %1;"
17283 : "+r" (i), "+m" (l->a.counter)
17284 : : "memory");
17285@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17286
17287 #define local_cmpxchg(l, o, n) \
17288 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17289+#define local_cmpxchg_unchecked(l, o, n) \
17290+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17291 /* Always has a lock prefix */
17292 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17293
17294diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17295new file mode 100644
17296index 0000000..2bfd3ba
17297--- /dev/null
17298+++ b/arch/x86/include/asm/mman.h
17299@@ -0,0 +1,15 @@
17300+#ifndef _X86_MMAN_H
17301+#define _X86_MMAN_H
17302+
17303+#include <uapi/asm/mman.h>
17304+
17305+#ifdef __KERNEL__
17306+#ifndef __ASSEMBLY__
17307+#ifdef CONFIG_X86_32
17308+#define arch_mmap_check i386_mmap_check
17309+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17310+#endif
17311+#endif
17312+#endif
17313+
17314+#endif /* X86_MMAN_H */
17315diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17316index 09b9620..923aecd 100644
17317--- a/arch/x86/include/asm/mmu.h
17318+++ b/arch/x86/include/asm/mmu.h
17319@@ -9,7 +9,7 @@
17320 * we put the segment information here.
17321 */
17322 typedef struct {
17323- void *ldt;
17324+ struct desc_struct *ldt;
17325 int size;
17326
17327 #ifdef CONFIG_X86_64
17328@@ -18,7 +18,19 @@ typedef struct {
17329 #endif
17330
17331 struct mutex lock;
17332- void __user *vdso;
17333+ unsigned long vdso;
17334+
17335+#ifdef CONFIG_X86_32
17336+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17337+ unsigned long user_cs_base;
17338+ unsigned long user_cs_limit;
17339+
17340+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17341+ cpumask_t cpu_user_cs_mask;
17342+#endif
17343+
17344+#endif
17345+#endif
17346
17347 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17348 } mm_context_t;
17349diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17350index 883f6b93..6869d96 100644
17351--- a/arch/x86/include/asm/mmu_context.h
17352+++ b/arch/x86/include/asm/mmu_context.h
17353@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17354
17355 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17356 {
17357+
17358+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17359+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17360+ unsigned int i;
17361+ pgd_t *pgd;
17362+
17363+ pax_open_kernel();
17364+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17365+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17366+ set_pgd_batched(pgd+i, native_make_pgd(0));
17367+ pax_close_kernel();
17368+ }
17369+#endif
17370+
17371 #ifdef CONFIG_SMP
17372 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17373 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17374@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17375 struct task_struct *tsk)
17376 {
17377 unsigned cpu = smp_processor_id();
17378+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17379+ int tlbstate = TLBSTATE_OK;
17380+#endif
17381
17382 if (likely(prev != next)) {
17383 #ifdef CONFIG_SMP
17384+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17385+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17386+#endif
17387 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17388 this_cpu_write(cpu_tlbstate.active_mm, next);
17389 #endif
17390 cpumask_set_cpu(cpu, mm_cpumask(next));
17391
17392 /* Re-load page tables */
17393+#ifdef CONFIG_PAX_PER_CPU_PGD
17394+ pax_open_kernel();
17395+
17396+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17397+ if (static_cpu_has(X86_FEATURE_PCID))
17398+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17399+ else
17400+#endif
17401+
17402+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17403+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17404+ pax_close_kernel();
17405+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17406+
17407+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17408+ if (static_cpu_has(X86_FEATURE_PCID)) {
17409+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17410+ u64 descriptor[2];
17411+ descriptor[0] = PCID_USER;
17412+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17413+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17414+ descriptor[0] = PCID_KERNEL;
17415+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17416+ }
17417+ } else {
17418+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17419+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17420+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17421+ else
17422+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17423+ }
17424+ } else
17425+#endif
17426+
17427+ load_cr3(get_cpu_pgd(cpu, kernel));
17428+#else
17429 load_cr3(next->pgd);
17430+#endif
17431 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17432
17433 /* Stop flush ipis for the previous mm */
17434@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17435 */
17436 if (unlikely(prev->context.ldt != next->context.ldt))
17437 load_LDT_nolock(&next->context);
17438+
17439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17440+ if (!(__supported_pte_mask & _PAGE_NX)) {
17441+ smp_mb__before_atomic();
17442+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17443+ smp_mb__after_atomic();
17444+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17445+ }
17446+#endif
17447+
17448+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17449+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17450+ prev->context.user_cs_limit != next->context.user_cs_limit))
17451+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17452+#ifdef CONFIG_SMP
17453+ else if (unlikely(tlbstate != TLBSTATE_OK))
17454+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17455+#endif
17456+#endif
17457+
17458 }
17459+ else {
17460+
17461+#ifdef CONFIG_PAX_PER_CPU_PGD
17462+ pax_open_kernel();
17463+
17464+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17465+ if (static_cpu_has(X86_FEATURE_PCID))
17466+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17467+ else
17468+#endif
17469+
17470+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17471+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17472+ pax_close_kernel();
17473+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17474+
17475+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17476+ if (static_cpu_has(X86_FEATURE_PCID)) {
17477+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17478+ u64 descriptor[2];
17479+ descriptor[0] = PCID_USER;
17480+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17481+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17482+ descriptor[0] = PCID_KERNEL;
17483+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17484+ }
17485+ } else {
17486+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17487+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17488+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17489+ else
17490+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17491+ }
17492+ } else
17493+#endif
17494+
17495+ load_cr3(get_cpu_pgd(cpu, kernel));
17496+#endif
17497+
17498 #ifdef CONFIG_SMP
17499- else {
17500 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17501 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17502
17503@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17504 * tlb flush IPI delivery. We must reload CR3
17505 * to make sure to use no freed page tables.
17506 */
17507+
17508+#ifndef CONFIG_PAX_PER_CPU_PGD
17509 load_cr3(next->pgd);
17510 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17511+#endif
17512+
17513 load_mm_cr4(next);
17514 load_LDT_nolock(&next->context);
17515+
17516+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17517+ if (!(__supported_pte_mask & _PAGE_NX))
17518+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17519+#endif
17520+
17521+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17522+#ifdef CONFIG_PAX_PAGEEXEC
17523+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17524+#endif
17525+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17526+#endif
17527+
17528 }
17529+#endif
17530 }
17531-#endif
17532 }
17533
17534 #define activate_mm(prev, next) \
17535diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17536index e3b7819..b257c64 100644
17537--- a/arch/x86/include/asm/module.h
17538+++ b/arch/x86/include/asm/module.h
17539@@ -5,6 +5,7 @@
17540
17541 #ifdef CONFIG_X86_64
17542 /* X86_64 does not define MODULE_PROC_FAMILY */
17543+#define MODULE_PROC_FAMILY ""
17544 #elif defined CONFIG_M486
17545 #define MODULE_PROC_FAMILY "486 "
17546 #elif defined CONFIG_M586
17547@@ -57,8 +58,20 @@
17548 #error unknown processor family
17549 #endif
17550
17551-#ifdef CONFIG_X86_32
17552-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17553+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17554+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17555+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17556+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17557+#else
17558+#define MODULE_PAX_KERNEXEC ""
17559 #endif
17560
17561+#ifdef CONFIG_PAX_MEMORY_UDEREF
17562+#define MODULE_PAX_UDEREF "UDEREF "
17563+#else
17564+#define MODULE_PAX_UDEREF ""
17565+#endif
17566+
17567+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17568+
17569 #endif /* _ASM_X86_MODULE_H */
17570diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17571index 5f2fc44..106caa6 100644
17572--- a/arch/x86/include/asm/nmi.h
17573+++ b/arch/x86/include/asm/nmi.h
17574@@ -36,26 +36,35 @@ enum {
17575
17576 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17577
17578+struct nmiaction;
17579+
17580+struct nmiwork {
17581+ const struct nmiaction *action;
17582+ u64 max_duration;
17583+ struct irq_work irq_work;
17584+};
17585+
17586 struct nmiaction {
17587 struct list_head list;
17588 nmi_handler_t handler;
17589- u64 max_duration;
17590- struct irq_work irq_work;
17591 unsigned long flags;
17592 const char *name;
17593-};
17594+ struct nmiwork *work;
17595+} __do_const;
17596
17597 #define register_nmi_handler(t, fn, fg, n, init...) \
17598 ({ \
17599- static struct nmiaction init fn##_na = { \
17600+ static struct nmiwork fn##_nw; \
17601+ static const struct nmiaction init fn##_na = { \
17602 .handler = (fn), \
17603 .name = (n), \
17604 .flags = (fg), \
17605+ .work = &fn##_nw, \
17606 }; \
17607 __register_nmi_handler((t), &fn##_na); \
17608 })
17609
17610-int __register_nmi_handler(unsigned int, struct nmiaction *);
17611+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17612
17613 void unregister_nmi_handler(unsigned int, const char *);
17614
17615diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17616index 802dde3..9183e68 100644
17617--- a/arch/x86/include/asm/page.h
17618+++ b/arch/x86/include/asm/page.h
17619@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17620 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17621
17622 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17623+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17624
17625 #define __boot_va(x) __va(x)
17626 #define __boot_pa(x) __pa(x)
17627@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17628 * virt_to_page(kaddr) returns a valid pointer if and only if
17629 * virt_addr_valid(kaddr) returns true.
17630 */
17631-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17632 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17633 extern bool __virt_addr_valid(unsigned long kaddr);
17634 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17635
17636+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17637+#define virt_to_page(kaddr) \
17638+ ({ \
17639+ const void *__kaddr = (const void *)(kaddr); \
17640+ BUG_ON(!virt_addr_valid(__kaddr)); \
17641+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17642+ })
17643+#else
17644+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17645+#endif
17646+
17647 #endif /* __ASSEMBLY__ */
17648
17649 #include <asm-generic/memory_model.h>
17650diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17651index b3bebf9..13ac22e 100644
17652--- a/arch/x86/include/asm/page_64.h
17653+++ b/arch/x86/include/asm/page_64.h
17654@@ -7,9 +7,9 @@
17655
17656 /* duplicated to the one in bootmem.h */
17657 extern unsigned long max_pfn;
17658-extern unsigned long phys_base;
17659+extern const unsigned long phys_base;
17660
17661-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17662+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17663 {
17664 unsigned long y = x - __START_KERNEL_map;
17665
17666@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17667 }
17668
17669 #ifdef CONFIG_DEBUG_VIRTUAL
17670-extern unsigned long __phys_addr(unsigned long);
17671-extern unsigned long __phys_addr_symbol(unsigned long);
17672+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17673+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17674 #else
17675 #define __phys_addr(x) __phys_addr_nodebug(x)
17676 #define __phys_addr_symbol(x) \
17677diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17678index 965c47d..ffe0af8 100644
17679--- a/arch/x86/include/asm/paravirt.h
17680+++ b/arch/x86/include/asm/paravirt.h
17681@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17682 return (pmd_t) { ret };
17683 }
17684
17685-static inline pmdval_t pmd_val(pmd_t pmd)
17686+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17687 {
17688 pmdval_t ret;
17689
17690@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17691 val);
17692 }
17693
17694+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17695+{
17696+ pgdval_t val = native_pgd_val(pgd);
17697+
17698+ if (sizeof(pgdval_t) > sizeof(long))
17699+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17700+ val, (u64)val >> 32);
17701+ else
17702+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17703+ val);
17704+}
17705+
17706 static inline void pgd_clear(pgd_t *pgdp)
17707 {
17708 set_pgd(pgdp, __pgd(0));
17709@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17710 pv_mmu_ops.set_fixmap(idx, phys, flags);
17711 }
17712
17713+#ifdef CONFIG_PAX_KERNEXEC
17714+static inline unsigned long pax_open_kernel(void)
17715+{
17716+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17717+}
17718+
17719+static inline unsigned long pax_close_kernel(void)
17720+{
17721+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17722+}
17723+#else
17724+static inline unsigned long pax_open_kernel(void) { return 0; }
17725+static inline unsigned long pax_close_kernel(void) { return 0; }
17726+#endif
17727+
17728 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17729
17730 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17731@@ -906,7 +933,7 @@ extern void default_banner(void);
17732
17733 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17734 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17735-#define PARA_INDIRECT(addr) *%cs:addr
17736+#define PARA_INDIRECT(addr) *%ss:addr
17737 #endif
17738
17739 #define INTERRUPT_RETURN \
17740@@ -981,6 +1008,21 @@ extern void default_banner(void);
17741 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17742 CLBR_NONE, \
17743 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17744+
17745+#define GET_CR0_INTO_RDI \
17746+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17747+ mov %rax,%rdi
17748+
17749+#define SET_RDI_INTO_CR0 \
17750+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17751+
17752+#define GET_CR3_INTO_RDI \
17753+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17754+ mov %rax,%rdi
17755+
17756+#define SET_RDI_INTO_CR3 \
17757+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17758+
17759 #endif /* CONFIG_X86_32 */
17760
17761 #endif /* __ASSEMBLY__ */
17762diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17763index 7549b8b..f0edfda 100644
17764--- a/arch/x86/include/asm/paravirt_types.h
17765+++ b/arch/x86/include/asm/paravirt_types.h
17766@@ -84,7 +84,7 @@ struct pv_init_ops {
17767 */
17768 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17769 unsigned long addr, unsigned len);
17770-};
17771+} __no_const __no_randomize_layout;
17772
17773
17774 struct pv_lazy_ops {
17775@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17776 void (*enter)(void);
17777 void (*leave)(void);
17778 void (*flush)(void);
17779-};
17780+} __no_randomize_layout;
17781
17782 struct pv_time_ops {
17783 unsigned long long (*sched_clock)(void);
17784 unsigned long long (*steal_clock)(int cpu);
17785 unsigned long (*get_tsc_khz)(void);
17786-};
17787+} __no_const __no_randomize_layout;
17788
17789 struct pv_cpu_ops {
17790 /* hooks for various privileged instructions */
17791@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17792
17793 void (*start_context_switch)(struct task_struct *prev);
17794 void (*end_context_switch)(struct task_struct *next);
17795-};
17796+} __no_const __no_randomize_layout;
17797
17798 struct pv_irq_ops {
17799 /*
17800@@ -215,7 +215,7 @@ struct pv_irq_ops {
17801 #ifdef CONFIG_X86_64
17802 void (*adjust_exception_frame)(void);
17803 #endif
17804-};
17805+} __no_randomize_layout;
17806
17807 struct pv_apic_ops {
17808 #ifdef CONFIG_X86_LOCAL_APIC
17809@@ -223,7 +223,7 @@ struct pv_apic_ops {
17810 unsigned long start_eip,
17811 unsigned long start_esp);
17812 #endif
17813-};
17814+} __no_const __no_randomize_layout;
17815
17816 struct pv_mmu_ops {
17817 unsigned long (*read_cr2)(void);
17818@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17819 struct paravirt_callee_save make_pud;
17820
17821 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17822+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17823 #endif /* PAGETABLE_LEVELS == 4 */
17824 #endif /* PAGETABLE_LEVELS >= 3 */
17825
17826@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17827 an mfn. We can tell which is which from the index. */
17828 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17829 phys_addr_t phys, pgprot_t flags);
17830-};
17831+
17832+#ifdef CONFIG_PAX_KERNEXEC
17833+ unsigned long (*pax_open_kernel)(void);
17834+ unsigned long (*pax_close_kernel)(void);
17835+#endif
17836+
17837+} __no_randomize_layout;
17838
17839 struct arch_spinlock;
17840 #ifdef CONFIG_SMP
17841@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17842 struct pv_lock_ops {
17843 struct paravirt_callee_save lock_spinning;
17844 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17845-};
17846+} __no_randomize_layout;
17847
17848 /* This contains all the paravirt structures: we get a convenient
17849 * number for each function using the offset which we use to indicate
17850- * what to patch. */
17851+ * what to patch.
17852+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17853+ */
17854+
17855 struct paravirt_patch_template {
17856 struct pv_init_ops pv_init_ops;
17857 struct pv_time_ops pv_time_ops;
17858@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17859 struct pv_apic_ops pv_apic_ops;
17860 struct pv_mmu_ops pv_mmu_ops;
17861 struct pv_lock_ops pv_lock_ops;
17862-};
17863+} __no_randomize_layout;
17864
17865 extern struct pv_info pv_info;
17866 extern struct pv_init_ops pv_init_ops;
17867diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17868index c4412e9..90e88c5 100644
17869--- a/arch/x86/include/asm/pgalloc.h
17870+++ b/arch/x86/include/asm/pgalloc.h
17871@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17872 pmd_t *pmd, pte_t *pte)
17873 {
17874 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17875+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17876+}
17877+
17878+static inline void pmd_populate_user(struct mm_struct *mm,
17879+ pmd_t *pmd, pte_t *pte)
17880+{
17881+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17882 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17883 }
17884
17885@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17886
17887 #ifdef CONFIG_X86_PAE
17888 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17889+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17890+{
17891+ pud_populate(mm, pudp, pmd);
17892+}
17893 #else /* !CONFIG_X86_PAE */
17894 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17895 {
17896 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17897 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17898 }
17899+
17900+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17901+{
17902+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17903+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17904+}
17905 #endif /* CONFIG_X86_PAE */
17906
17907 #if PAGETABLE_LEVELS > 3
17908@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17909 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17910 }
17911
17912+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17913+{
17914+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17915+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17916+}
17917+
17918 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17919 {
17920 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17921diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17922index fd74a11..35fd5af 100644
17923--- a/arch/x86/include/asm/pgtable-2level.h
17924+++ b/arch/x86/include/asm/pgtable-2level.h
17925@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17926
17927 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17928 {
17929+ pax_open_kernel();
17930 *pmdp = pmd;
17931+ pax_close_kernel();
17932 }
17933
17934 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17935diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17936index cdaa58c..e61122b 100644
17937--- a/arch/x86/include/asm/pgtable-3level.h
17938+++ b/arch/x86/include/asm/pgtable-3level.h
17939@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17940
17941 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17942 {
17943+ pax_open_kernel();
17944 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17945+ pax_close_kernel();
17946 }
17947
17948 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17949 {
17950+ pax_open_kernel();
17951 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17952+ pax_close_kernel();
17953 }
17954
17955 /*
17956diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17957index a0c35bf..7045c6a 100644
17958--- a/arch/x86/include/asm/pgtable.h
17959+++ b/arch/x86/include/asm/pgtable.h
17960@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17961
17962 #ifndef __PAGETABLE_PUD_FOLDED
17963 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17964+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17965 #define pgd_clear(pgd) native_pgd_clear(pgd)
17966 #endif
17967
17968@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17969
17970 #define arch_end_context_switch(prev) do {} while(0)
17971
17972+#define pax_open_kernel() native_pax_open_kernel()
17973+#define pax_close_kernel() native_pax_close_kernel()
17974 #endif /* CONFIG_PARAVIRT */
17975
17976+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17977+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17978+
17979+#ifdef CONFIG_PAX_KERNEXEC
17980+static inline unsigned long native_pax_open_kernel(void)
17981+{
17982+ unsigned long cr0;
17983+
17984+ preempt_disable();
17985+ barrier();
17986+ cr0 = read_cr0() ^ X86_CR0_WP;
17987+ BUG_ON(cr0 & X86_CR0_WP);
17988+ write_cr0(cr0);
17989+ barrier();
17990+ return cr0 ^ X86_CR0_WP;
17991+}
17992+
17993+static inline unsigned long native_pax_close_kernel(void)
17994+{
17995+ unsigned long cr0;
17996+
17997+ barrier();
17998+ cr0 = read_cr0() ^ X86_CR0_WP;
17999+ BUG_ON(!(cr0 & X86_CR0_WP));
18000+ write_cr0(cr0);
18001+ barrier();
18002+ preempt_enable_no_resched();
18003+ return cr0 ^ X86_CR0_WP;
18004+}
18005+#else
18006+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18007+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18008+#endif
18009+
18010 /*
18011 * The following only work if pte_present() is true.
18012 * Undefined behaviour if not..
18013 */
18014+static inline int pte_user(pte_t pte)
18015+{
18016+ return pte_val(pte) & _PAGE_USER;
18017+}
18018+
18019 static inline int pte_dirty(pte_t pte)
18020 {
18021 return pte_flags(pte) & _PAGE_DIRTY;
18022@@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18023 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18024 }
18025
18026+static inline unsigned long pgd_pfn(pgd_t pgd)
18027+{
18028+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18029+}
18030+
18031 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18032
18033 static inline int pmd_large(pmd_t pte)
18034@@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18035 return pte_clear_flags(pte, _PAGE_RW);
18036 }
18037
18038+static inline pte_t pte_mkread(pte_t pte)
18039+{
18040+ return __pte(pte_val(pte) | _PAGE_USER);
18041+}
18042+
18043 static inline pte_t pte_mkexec(pte_t pte)
18044 {
18045- return pte_clear_flags(pte, _PAGE_NX);
18046+#ifdef CONFIG_X86_PAE
18047+ if (__supported_pte_mask & _PAGE_NX)
18048+ return pte_clear_flags(pte, _PAGE_NX);
18049+ else
18050+#endif
18051+ return pte_set_flags(pte, _PAGE_USER);
18052+}
18053+
18054+static inline pte_t pte_exprotect(pte_t pte)
18055+{
18056+#ifdef CONFIG_X86_PAE
18057+ if (__supported_pte_mask & _PAGE_NX)
18058+ return pte_set_flags(pte, _PAGE_NX);
18059+ else
18060+#endif
18061+ return pte_clear_flags(pte, _PAGE_USER);
18062 }
18063
18064 static inline pte_t pte_mkdirty(pte_t pte)
18065@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18066 #endif
18067
18068 #ifndef __ASSEMBLY__
18069+
18070+#ifdef CONFIG_PAX_PER_CPU_PGD
18071+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18072+enum cpu_pgd_type {kernel = 0, user = 1};
18073+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18074+{
18075+ return cpu_pgd[cpu][type];
18076+}
18077+#endif
18078+
18079 #include <linux/mm_types.h>
18080 #include <linux/mmdebug.h>
18081 #include <linux/log2.h>
18082@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18083 * Currently stuck as a macro due to indirect forward reference to
18084 * linux/mmzone.h's __section_mem_map_addr() definition:
18085 */
18086-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18087+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18088
18089 /* Find an entry in the second-level page table.. */
18090 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18091@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18092 * Currently stuck as a macro due to indirect forward reference to
18093 * linux/mmzone.h's __section_mem_map_addr() definition:
18094 */
18095-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18096+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18097
18098 /* to find an entry in a page-table-directory. */
18099 static inline unsigned long pud_index(unsigned long address)
18100@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18101
18102 static inline int pgd_bad(pgd_t pgd)
18103 {
18104- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18105+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18106 }
18107
18108 static inline int pgd_none(pgd_t pgd)
18109@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18110 * pgd_offset() returns a (pgd_t *)
18111 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18112 */
18113-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18114+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18115+
18116+#ifdef CONFIG_PAX_PER_CPU_PGD
18117+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18118+#endif
18119+
18120 /*
18121 * a shortcut which implies the use of the kernel's pgd, instead
18122 * of a process's
18123@@ -660,6 +742,25 @@ static inline int pgd_none(pgd_t pgd)
18124 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18125 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18126
18127+#ifdef CONFIG_X86_32
18128+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18129+#else
18130+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18131+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18132+
18133+#ifdef CONFIG_PAX_MEMORY_UDEREF
18134+#ifdef __ASSEMBLY__
18135+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18136+#else
18137+extern unsigned long pax_user_shadow_base;
18138+extern pgdval_t clone_pgd_mask;
18139+#endif
18140+#else
18141+#define pax_user_shadow_base (0UL)
18142+#endif
18143+
18144+#endif
18145+
18146 #ifndef __ASSEMBLY__
18147
18148 extern int direct_gbpages;
18149@@ -826,11 +927,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18150 * dst and src can be on the same page, but the range must not overlap,
18151 * and must not cross a page boundary.
18152 */
18153-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18154+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18155 {
18156- memcpy(dst, src, count * sizeof(pgd_t));
18157+ pax_open_kernel();
18158+ while (count--)
18159+ *dst++ = *src++;
18160+ pax_close_kernel();
18161 }
18162
18163+#ifdef CONFIG_PAX_PER_CPU_PGD
18164+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18165+#endif
18166+
18167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18168+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18169+#else
18170+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18171+#endif
18172+
18173 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18174 static inline int page_level_shift(enum pg_level level)
18175 {
18176diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18177index b6c0b40..3535d47 100644
18178--- a/arch/x86/include/asm/pgtable_32.h
18179+++ b/arch/x86/include/asm/pgtable_32.h
18180@@ -25,9 +25,6 @@
18181 struct mm_struct;
18182 struct vm_area_struct;
18183
18184-extern pgd_t swapper_pg_dir[1024];
18185-extern pgd_t initial_page_table[1024];
18186-
18187 static inline void pgtable_cache_init(void) { }
18188 static inline void check_pgt_cache(void) { }
18189 void paging_init(void);
18190@@ -45,6 +42,12 @@ void paging_init(void);
18191 # include <asm/pgtable-2level.h>
18192 #endif
18193
18194+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18195+extern pgd_t initial_page_table[PTRS_PER_PGD];
18196+#ifdef CONFIG_X86_PAE
18197+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18198+#endif
18199+
18200 #if defined(CONFIG_HIGHPTE)
18201 #define pte_offset_map(dir, address) \
18202 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18203@@ -59,12 +62,17 @@ void paging_init(void);
18204 /* Clear a kernel PTE and flush it from the TLB */
18205 #define kpte_clear_flush(ptep, vaddr) \
18206 do { \
18207+ pax_open_kernel(); \
18208 pte_clear(&init_mm, (vaddr), (ptep)); \
18209+ pax_close_kernel(); \
18210 __flush_tlb_one((vaddr)); \
18211 } while (0)
18212
18213 #endif /* !__ASSEMBLY__ */
18214
18215+#define HAVE_ARCH_UNMAPPED_AREA
18216+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18217+
18218 /*
18219 * kern_addr_valid() is (1) for FLATMEM and (0) for
18220 * SPARSEMEM and DISCONTIGMEM
18221diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18222index 9fb2f2b..b04b4bf 100644
18223--- a/arch/x86/include/asm/pgtable_32_types.h
18224+++ b/arch/x86/include/asm/pgtable_32_types.h
18225@@ -8,7 +8,7 @@
18226 */
18227 #ifdef CONFIG_X86_PAE
18228 # include <asm/pgtable-3level_types.h>
18229-# define PMD_SIZE (1UL << PMD_SHIFT)
18230+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18231 # define PMD_MASK (~(PMD_SIZE - 1))
18232 #else
18233 # include <asm/pgtable-2level_types.h>
18234@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18235 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18236 #endif
18237
18238+#ifdef CONFIG_PAX_KERNEXEC
18239+#ifndef __ASSEMBLY__
18240+extern unsigned char MODULES_EXEC_VADDR[];
18241+extern unsigned char MODULES_EXEC_END[];
18242+#endif
18243+#include <asm/boot.h>
18244+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18245+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18246+#else
18247+#define ktla_ktva(addr) (addr)
18248+#define ktva_ktla(addr) (addr)
18249+#endif
18250+
18251 #define MODULES_VADDR VMALLOC_START
18252 #define MODULES_END VMALLOC_END
18253 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18254diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18255index 2ee7811..55aca24 100644
18256--- a/arch/x86/include/asm/pgtable_64.h
18257+++ b/arch/x86/include/asm/pgtable_64.h
18258@@ -16,11 +16,16 @@
18259
18260 extern pud_t level3_kernel_pgt[512];
18261 extern pud_t level3_ident_pgt[512];
18262+extern pud_t level3_vmalloc_start_pgt[512];
18263+extern pud_t level3_vmalloc_end_pgt[512];
18264+extern pud_t level3_vmemmap_pgt[512];
18265+extern pud_t level2_vmemmap_pgt[512];
18266 extern pmd_t level2_kernel_pgt[512];
18267 extern pmd_t level2_fixmap_pgt[512];
18268-extern pmd_t level2_ident_pgt[512];
18269-extern pte_t level1_fixmap_pgt[512];
18270-extern pgd_t init_level4_pgt[];
18271+extern pmd_t level2_ident_pgt[2][512];
18272+extern pte_t level1_fixmap_pgt[3][512];
18273+extern pte_t level1_vsyscall_pgt[512];
18274+extern pgd_t init_level4_pgt[512];
18275
18276 #define swapper_pg_dir init_level4_pgt
18277
18278@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18279
18280 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18281 {
18282+ pax_open_kernel();
18283 *pmdp = pmd;
18284+ pax_close_kernel();
18285 }
18286
18287 static inline void native_pmd_clear(pmd_t *pmd)
18288@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18289
18290 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18291 {
18292+ pax_open_kernel();
18293 *pudp = pud;
18294+ pax_close_kernel();
18295 }
18296
18297 static inline void native_pud_clear(pud_t *pud)
18298@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18299
18300 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18301 {
18302+ pax_open_kernel();
18303+ *pgdp = pgd;
18304+ pax_close_kernel();
18305+}
18306+
18307+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18308+{
18309 *pgdp = pgd;
18310 }
18311
18312diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18313index 602b602..acb53ed 100644
18314--- a/arch/x86/include/asm/pgtable_64_types.h
18315+++ b/arch/x86/include/asm/pgtable_64_types.h
18316@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18317 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18318 #define MODULES_END _AC(0xffffffffff000000, UL)
18319 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18320+#define MODULES_EXEC_VADDR MODULES_VADDR
18321+#define MODULES_EXEC_END MODULES_END
18322 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18323 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18324 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18325 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18326
18327+#define ktla_ktva(addr) (addr)
18328+#define ktva_ktla(addr) (addr)
18329+
18330 #define EARLY_DYNAMIC_PAGE_TABLES 64
18331
18332 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18333diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18334index 8c7c108..1c1b77f 100644
18335--- a/arch/x86/include/asm/pgtable_types.h
18336+++ b/arch/x86/include/asm/pgtable_types.h
18337@@ -85,8 +85,10 @@
18338
18339 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18340 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18341-#else
18342+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18343 #define _PAGE_NX (_AT(pteval_t, 0))
18344+#else
18345+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18346 #endif
18347
18348 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18349@@ -141,6 +143,9 @@ enum page_cache_mode {
18350 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18351 _PAGE_ACCESSED)
18352
18353+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18354+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18355+
18356 #define __PAGE_KERNEL_EXEC \
18357 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18358 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18359@@ -148,7 +153,7 @@ enum page_cache_mode {
18360 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18361 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18362 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18363-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18364+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18365 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18366 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18367 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18368@@ -194,7 +199,7 @@ enum page_cache_mode {
18369 #ifdef CONFIG_X86_64
18370 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18371 #else
18372-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18373+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18374 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18375 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18376 #endif
18377@@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18378 {
18379 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18380 }
18381+#endif
18382
18383+#if PAGETABLE_LEVELS == 3
18384+#include <asm-generic/pgtable-nopud.h>
18385+#endif
18386+
18387+#if PAGETABLE_LEVELS == 2
18388+#include <asm-generic/pgtable-nopmd.h>
18389+#endif
18390+
18391+#ifndef __ASSEMBLY__
18392 #if PAGETABLE_LEVELS > 3
18393 typedef struct { pudval_t pud; } pud_t;
18394
18395@@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18396 return pud.pud;
18397 }
18398 #else
18399-#include <asm-generic/pgtable-nopud.h>
18400-
18401 static inline pudval_t native_pud_val(pud_t pud)
18402 {
18403 return native_pgd_val(pud.pgd);
18404@@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18405 return pmd.pmd;
18406 }
18407 #else
18408-#include <asm-generic/pgtable-nopmd.h>
18409-
18410 static inline pmdval_t native_pmd_val(pmd_t pmd)
18411 {
18412 return native_pgd_val(pmd.pud.pgd);
18413@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18414
18415 extern pteval_t __supported_pte_mask;
18416 extern void set_nx(void);
18417-extern int nx_enabled;
18418
18419 #define pgprot_writecombine pgprot_writecombine
18420 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18421diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18422index 8f327184..368fb29 100644
18423--- a/arch/x86/include/asm/preempt.h
18424+++ b/arch/x86/include/asm/preempt.h
18425@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18426 */
18427 static __always_inline bool __preempt_count_dec_and_test(void)
18428 {
18429- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18430+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18431 }
18432
18433 /*
18434diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18435index ec1c935..5cc6023 100644
18436--- a/arch/x86/include/asm/processor.h
18437+++ b/arch/x86/include/asm/processor.h
18438@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18439 /* Index into per_cpu list: */
18440 u16 cpu_index;
18441 u32 microcode;
18442-};
18443+} __randomize_layout;
18444
18445 #define X86_VENDOR_INTEL 0
18446 #define X86_VENDOR_CYRIX 1
18447@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18448 : "memory");
18449 }
18450
18451+/* invpcid (%rdx),%rax */
18452+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18453+
18454+#define INVPCID_SINGLE_ADDRESS 0UL
18455+#define INVPCID_SINGLE_CONTEXT 1UL
18456+#define INVPCID_ALL_GLOBAL 2UL
18457+#define INVPCID_ALL_NONGLOBAL 3UL
18458+
18459+#define PCID_KERNEL 0UL
18460+#define PCID_USER 1UL
18461+#define PCID_NOFLUSH (1UL << 63)
18462+
18463 static inline void load_cr3(pgd_t *pgdir)
18464 {
18465- write_cr3(__pa(pgdir));
18466+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18467 }
18468
18469 #ifdef CONFIG_X86_32
18470@@ -282,7 +294,7 @@ struct tss_struct {
18471
18472 } ____cacheline_aligned;
18473
18474-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18475+extern struct tss_struct init_tss[NR_CPUS];
18476
18477 /*
18478 * Save the original ist values for checking stack pointers during debugging
18479@@ -479,6 +491,7 @@ struct thread_struct {
18480 unsigned short ds;
18481 unsigned short fsindex;
18482 unsigned short gsindex;
18483+ unsigned short ss;
18484 #endif
18485 #ifdef CONFIG_X86_32
18486 unsigned long ip;
18487@@ -805,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
18488 */
18489 #define TASK_SIZE PAGE_OFFSET
18490 #define TASK_SIZE_MAX TASK_SIZE
18491+
18492+#ifdef CONFIG_PAX_SEGMEXEC
18493+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18494+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18495+#else
18496 #define STACK_TOP TASK_SIZE
18497-#define STACK_TOP_MAX STACK_TOP
18498+#endif
18499+
18500+#define STACK_TOP_MAX TASK_SIZE
18501
18502 #define INIT_THREAD { \
18503- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18504+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18505 .vm86_info = NULL, \
18506 .sysenter_cs = __KERNEL_CS, \
18507 .io_bitmap_ptr = NULL, \
18508@@ -823,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
18509 */
18510 #define INIT_TSS { \
18511 .x86_tss = { \
18512- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18513+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18514 .ss0 = __KERNEL_DS, \
18515 .ss1 = __KERNEL_CS, \
18516 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18517@@ -834,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
18518 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18519
18520 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18521-#define KSTK_TOP(info) \
18522-({ \
18523- unsigned long *__ptr = (unsigned long *)(info); \
18524- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18525-})
18526+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18527
18528 /*
18529 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18530@@ -853,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18531 #define task_pt_regs(task) \
18532 ({ \
18533 struct pt_regs *__regs__; \
18534- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18535+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18536 __regs__ - 1; \
18537 })
18538
18539@@ -869,13 +885,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18540 * particular problem by preventing anything from being mapped
18541 * at the maximum canonical address.
18542 */
18543-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18544+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18545
18546 /* This decides where the kernel will search for a free chunk of vm
18547 * space during mmap's.
18548 */
18549 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18550- 0xc0000000 : 0xFFFFe000)
18551+ 0xc0000000 : 0xFFFFf000)
18552
18553 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18554 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18555@@ -886,11 +902,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18556 #define STACK_TOP_MAX TASK_SIZE_MAX
18557
18558 #define INIT_THREAD { \
18559- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18560+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18561 }
18562
18563 #define INIT_TSS { \
18564- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18565+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18566 }
18567
18568 /*
18569@@ -918,6 +934,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18570 */
18571 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18572
18573+#ifdef CONFIG_PAX_SEGMEXEC
18574+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18575+#endif
18576+
18577 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18578
18579 /* Get/set a process' ability to use the timestamp counter instruction */
18580@@ -962,7 +982,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18581 return 0;
18582 }
18583
18584-extern unsigned long arch_align_stack(unsigned long sp);
18585+#define arch_align_stack(x) ((x) & ~0xfUL)
18586 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18587
18588 void default_idle(void);
18589@@ -972,6 +992,6 @@ bool xen_set_default_idle(void);
18590 #define xen_set_default_idle 0
18591 #endif
18592
18593-void stop_this_cpu(void *dummy);
18594+void stop_this_cpu(void *dummy) __noreturn;
18595 void df_debug(struct pt_regs *regs, long error_code);
18596 #endif /* _ASM_X86_PROCESSOR_H */
18597diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18598index 86fc2bb..bd5049a 100644
18599--- a/arch/x86/include/asm/ptrace.h
18600+++ b/arch/x86/include/asm/ptrace.h
18601@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18602 }
18603
18604 /*
18605- * user_mode_vm(regs) determines whether a register set came from user mode.
18606+ * user_mode(regs) determines whether a register set came from user mode.
18607 * This is true if V8086 mode was enabled OR if the register set was from
18608 * protected mode with RPL-3 CS value. This tricky test checks that with
18609 * one comparison. Many places in the kernel can bypass this full check
18610- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18611+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18612+ * be used.
18613 */
18614-static inline int user_mode(struct pt_regs *regs)
18615+static inline int user_mode_novm(struct pt_regs *regs)
18616 {
18617 #ifdef CONFIG_X86_32
18618 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18619 #else
18620- return !!(regs->cs & 3);
18621+ return !!(regs->cs & SEGMENT_RPL_MASK);
18622 #endif
18623 }
18624
18625-static inline int user_mode_vm(struct pt_regs *regs)
18626+static inline int user_mode(struct pt_regs *regs)
18627 {
18628 #ifdef CONFIG_X86_32
18629 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18630 USER_RPL;
18631 #else
18632- return user_mode(regs);
18633+ return user_mode_novm(regs);
18634 #endif
18635 }
18636
18637@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18638 #ifdef CONFIG_X86_64
18639 static inline bool user_64bit_mode(struct pt_regs *regs)
18640 {
18641+ unsigned long cs = regs->cs & 0xffff;
18642 #ifndef CONFIG_PARAVIRT
18643 /*
18644 * On non-paravirt systems, this is the only long mode CPL 3
18645 * selector. We do not allow long mode selectors in the LDT.
18646 */
18647- return regs->cs == __USER_CS;
18648+ return cs == __USER_CS;
18649 #else
18650 /* Headers are too twisted for this to go in paravirt.h. */
18651- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18652+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18653 #endif
18654 }
18655
18656@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18657 * Traps from the kernel do not save sp and ss.
18658 * Use the helper function to retrieve sp.
18659 */
18660- if (offset == offsetof(struct pt_regs, sp) &&
18661- regs->cs == __KERNEL_CS)
18662- return kernel_stack_pointer(regs);
18663+ if (offset == offsetof(struct pt_regs, sp)) {
18664+ unsigned long cs = regs->cs & 0xffff;
18665+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18666+ return kernel_stack_pointer(regs);
18667+ }
18668 #endif
18669 return *(unsigned long *)((unsigned long)regs + offset);
18670 }
18671diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18672index ae0e241..e80b10b 100644
18673--- a/arch/x86/include/asm/qrwlock.h
18674+++ b/arch/x86/include/asm/qrwlock.h
18675@@ -7,8 +7,8 @@
18676 #define queue_write_unlock queue_write_unlock
18677 static inline void queue_write_unlock(struct qrwlock *lock)
18678 {
18679- barrier();
18680- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18681+ barrier();
18682+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18683 }
18684 #endif
18685
18686diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18687index 9c6b890..5305f53 100644
18688--- a/arch/x86/include/asm/realmode.h
18689+++ b/arch/x86/include/asm/realmode.h
18690@@ -22,16 +22,14 @@ struct real_mode_header {
18691 #endif
18692 /* APM/BIOS reboot */
18693 u32 machine_real_restart_asm;
18694-#ifdef CONFIG_X86_64
18695 u32 machine_real_restart_seg;
18696-#endif
18697 };
18698
18699 /* This must match data at trampoline_32/64.S */
18700 struct trampoline_header {
18701 #ifdef CONFIG_X86_32
18702 u32 start;
18703- u16 gdt_pad;
18704+ u16 boot_cs;
18705 u16 gdt_limit;
18706 u32 gdt_base;
18707 #else
18708diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18709index a82c4f1..ac45053 100644
18710--- a/arch/x86/include/asm/reboot.h
18711+++ b/arch/x86/include/asm/reboot.h
18712@@ -6,13 +6,13 @@
18713 struct pt_regs;
18714
18715 struct machine_ops {
18716- void (*restart)(char *cmd);
18717- void (*halt)(void);
18718- void (*power_off)(void);
18719+ void (* __noreturn restart)(char *cmd);
18720+ void (* __noreturn halt)(void);
18721+ void (* __noreturn power_off)(void);
18722 void (*shutdown)(void);
18723 void (*crash_shutdown)(struct pt_regs *);
18724- void (*emergency_restart)(void);
18725-};
18726+ void (* __noreturn emergency_restart)(void);
18727+} __no_const;
18728
18729 extern struct machine_ops machine_ops;
18730
18731diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18732index 8f7866a..e442f20 100644
18733--- a/arch/x86/include/asm/rmwcc.h
18734+++ b/arch/x86/include/asm/rmwcc.h
18735@@ -3,7 +3,34 @@
18736
18737 #ifdef CC_HAVE_ASM_GOTO
18738
18739-#define __GEN_RMWcc(fullop, var, cc, ...) \
18740+#ifdef CONFIG_PAX_REFCOUNT
18741+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18742+do { \
18743+ asm_volatile_goto (fullop \
18744+ ";jno 0f\n" \
18745+ fullantiop \
18746+ ";int $4\n0:\n" \
18747+ _ASM_EXTABLE(0b, 0b) \
18748+ ";j" cc " %l[cc_label]" \
18749+ : : "m" (var), ## __VA_ARGS__ \
18750+ : "memory" : cc_label); \
18751+ return 0; \
18752+cc_label: \
18753+ return 1; \
18754+} while (0)
18755+#else
18756+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18757+do { \
18758+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18759+ : : "m" (var), ## __VA_ARGS__ \
18760+ : "memory" : cc_label); \
18761+ return 0; \
18762+cc_label: \
18763+ return 1; \
18764+} while (0)
18765+#endif
18766+
18767+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18768 do { \
18769 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18770 : : "m" (var), ## __VA_ARGS__ \
18771@@ -13,15 +40,46 @@ cc_label: \
18772 return 1; \
18773 } while (0)
18774
18775-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18776- __GEN_RMWcc(op " " arg0, var, cc)
18777+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18778+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18779
18780-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18781- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18782+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18783+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18784+
18785+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18786+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18787+
18788+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18789+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18790
18791 #else /* !CC_HAVE_ASM_GOTO */
18792
18793-#define __GEN_RMWcc(fullop, var, cc, ...) \
18794+#ifdef CONFIG_PAX_REFCOUNT
18795+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18796+do { \
18797+ char c; \
18798+ asm volatile (fullop \
18799+ ";jno 0f\n" \
18800+ fullantiop \
18801+ ";int $4\n0:\n" \
18802+ _ASM_EXTABLE(0b, 0b) \
18803+ "; set" cc " %1" \
18804+ : "+m" (var), "=qm" (c) \
18805+ : __VA_ARGS__ : "memory"); \
18806+ return c != 0; \
18807+} while (0)
18808+#else
18809+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18810+do { \
18811+ char c; \
18812+ asm volatile (fullop "; set" cc " %1" \
18813+ : "+m" (var), "=qm" (c) \
18814+ : __VA_ARGS__ : "memory"); \
18815+ return c != 0; \
18816+} while (0)
18817+#endif
18818+
18819+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18820 do { \
18821 char c; \
18822 asm volatile (fullop "; set" cc " %1" \
18823@@ -30,11 +88,17 @@ do { \
18824 return c != 0; \
18825 } while (0)
18826
18827-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18828- __GEN_RMWcc(op " " arg0, var, cc)
18829+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18830+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18831+
18832+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18833+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18834+
18835+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18836+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18837
18838-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18839- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18840+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18841+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18842
18843 #endif /* CC_HAVE_ASM_GOTO */
18844
18845diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18846index cad82c9..2e5c5c1 100644
18847--- a/arch/x86/include/asm/rwsem.h
18848+++ b/arch/x86/include/asm/rwsem.h
18849@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18850 {
18851 asm volatile("# beginning down_read\n\t"
18852 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18853+
18854+#ifdef CONFIG_PAX_REFCOUNT
18855+ "jno 0f\n"
18856+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18857+ "int $4\n0:\n"
18858+ _ASM_EXTABLE(0b, 0b)
18859+#endif
18860+
18861 /* adds 0x00000001 */
18862 " jns 1f\n"
18863 " call call_rwsem_down_read_failed\n"
18864@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18865 "1:\n\t"
18866 " mov %1,%2\n\t"
18867 " add %3,%2\n\t"
18868+
18869+#ifdef CONFIG_PAX_REFCOUNT
18870+ "jno 0f\n"
18871+ "sub %3,%2\n"
18872+ "int $4\n0:\n"
18873+ _ASM_EXTABLE(0b, 0b)
18874+#endif
18875+
18876 " jle 2f\n\t"
18877 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18878 " jnz 1b\n\t"
18879@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18880 long tmp;
18881 asm volatile("# beginning down_write\n\t"
18882 LOCK_PREFIX " xadd %1,(%2)\n\t"
18883+
18884+#ifdef CONFIG_PAX_REFCOUNT
18885+ "jno 0f\n"
18886+ "mov %1,(%2)\n"
18887+ "int $4\n0:\n"
18888+ _ASM_EXTABLE(0b, 0b)
18889+#endif
18890+
18891 /* adds 0xffff0001, returns the old value */
18892 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18893 /* was the active mask 0 before? */
18894@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18895 long tmp;
18896 asm volatile("# beginning __up_read\n\t"
18897 LOCK_PREFIX " xadd %1,(%2)\n\t"
18898+
18899+#ifdef CONFIG_PAX_REFCOUNT
18900+ "jno 0f\n"
18901+ "mov %1,(%2)\n"
18902+ "int $4\n0:\n"
18903+ _ASM_EXTABLE(0b, 0b)
18904+#endif
18905+
18906 /* subtracts 1, returns the old value */
18907 " jns 1f\n\t"
18908 " call call_rwsem_wake\n" /* expects old value in %edx */
18909@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18910 long tmp;
18911 asm volatile("# beginning __up_write\n\t"
18912 LOCK_PREFIX " xadd %1,(%2)\n\t"
18913+
18914+#ifdef CONFIG_PAX_REFCOUNT
18915+ "jno 0f\n"
18916+ "mov %1,(%2)\n"
18917+ "int $4\n0:\n"
18918+ _ASM_EXTABLE(0b, 0b)
18919+#endif
18920+
18921 /* subtracts 0xffff0001, returns the old value */
18922 " jns 1f\n\t"
18923 " call call_rwsem_wake\n" /* expects old value in %edx */
18924@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18925 {
18926 asm volatile("# beginning __downgrade_write\n\t"
18927 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18928+
18929+#ifdef CONFIG_PAX_REFCOUNT
18930+ "jno 0f\n"
18931+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18932+ "int $4\n0:\n"
18933+ _ASM_EXTABLE(0b, 0b)
18934+#endif
18935+
18936 /*
18937 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18938 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18939@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18940 */
18941 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18942 {
18943- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18944+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18945+
18946+#ifdef CONFIG_PAX_REFCOUNT
18947+ "jno 0f\n"
18948+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18949+ "int $4\n0:\n"
18950+ _ASM_EXTABLE(0b, 0b)
18951+#endif
18952+
18953 : "+m" (sem->count)
18954 : "er" (delta));
18955 }
18956@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18957 */
18958 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18959 {
18960- return delta + xadd(&sem->count, delta);
18961+ return delta + xadd_check_overflow(&sem->count, delta);
18962 }
18963
18964 #endif /* __KERNEL__ */
18965diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18966index db257a5..b91bc77 100644
18967--- a/arch/x86/include/asm/segment.h
18968+++ b/arch/x86/include/asm/segment.h
18969@@ -73,10 +73,15 @@
18970 * 26 - ESPFIX small SS
18971 * 27 - per-cpu [ offset to per-cpu data area ]
18972 * 28 - stack_canary-20 [ for stack protector ]
18973- * 29 - unused
18974- * 30 - unused
18975+ * 29 - PCI BIOS CS
18976+ * 30 - PCI BIOS DS
18977 * 31 - TSS for double fault handler
18978 */
18979+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
18980+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
18981+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
18982+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
18983+
18984 #define GDT_ENTRY_TLS_MIN 6
18985 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
18986
18987@@ -88,6 +93,8 @@
18988
18989 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
18990
18991+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
18992+
18993 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
18994
18995 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
18996@@ -113,6 +120,12 @@
18997 #define __KERNEL_STACK_CANARY 0
18998 #endif
18999
19000+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19001+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19002+
19003+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19004+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19005+
19006 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19007
19008 /*
19009@@ -140,7 +153,7 @@
19010 */
19011
19012 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19013-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19014+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19015
19016
19017 #else
19018@@ -164,6 +177,8 @@
19019 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19020 #define __USER32_DS __USER_DS
19021
19022+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19023+
19024 #define GDT_ENTRY_TSS 8 /* needs two entries */
19025 #define GDT_ENTRY_LDT 10 /* needs two entries */
19026 #define GDT_ENTRY_TLS_MIN 12
19027@@ -172,6 +187,8 @@
19028 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19029 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19030
19031+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19032+
19033 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19034 #define FS_TLS 0
19035 #define GS_TLS 1
19036@@ -179,12 +196,14 @@
19037 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19038 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19039
19040-#define GDT_ENTRIES 16
19041+#define GDT_ENTRIES 17
19042
19043 #endif
19044
19045 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19046+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19047 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19048+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19049 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19050 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19051 #ifndef CONFIG_PARAVIRT
19052@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19053 {
19054 unsigned long __limit;
19055 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19056- return __limit + 1;
19057+ return __limit;
19058 }
19059
19060 #endif /* !__ASSEMBLY__ */
19061diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19062index 8d3120f..352b440 100644
19063--- a/arch/x86/include/asm/smap.h
19064+++ b/arch/x86/include/asm/smap.h
19065@@ -25,11 +25,40 @@
19066
19067 #include <asm/alternative-asm.h>
19068
19069+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19070+#define ASM_PAX_OPEN_USERLAND \
19071+ 661: jmp 663f; \
19072+ .pushsection .altinstr_replacement, "a" ; \
19073+ 662: pushq %rax; nop; \
19074+ .popsection ; \
19075+ .pushsection .altinstructions, "a" ; \
19076+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19077+ .popsection ; \
19078+ call __pax_open_userland; \
19079+ popq %rax; \
19080+ 663:
19081+
19082+#define ASM_PAX_CLOSE_USERLAND \
19083+ 661: jmp 663f; \
19084+ .pushsection .altinstr_replacement, "a" ; \
19085+ 662: pushq %rax; nop; \
19086+ .popsection; \
19087+ .pushsection .altinstructions, "a" ; \
19088+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19089+ .popsection; \
19090+ call __pax_close_userland; \
19091+ popq %rax; \
19092+ 663:
19093+#else
19094+#define ASM_PAX_OPEN_USERLAND
19095+#define ASM_PAX_CLOSE_USERLAND
19096+#endif
19097+
19098 #ifdef CONFIG_X86_SMAP
19099
19100 #define ASM_CLAC \
19101 661: ASM_NOP3 ; \
19102- .pushsection .altinstr_replacement, "ax" ; \
19103+ .pushsection .altinstr_replacement, "a" ; \
19104 662: __ASM_CLAC ; \
19105 .popsection ; \
19106 .pushsection .altinstructions, "a" ; \
19107@@ -38,7 +67,7 @@
19108
19109 #define ASM_STAC \
19110 661: ASM_NOP3 ; \
19111- .pushsection .altinstr_replacement, "ax" ; \
19112+ .pushsection .altinstr_replacement, "a" ; \
19113 662: __ASM_STAC ; \
19114 .popsection ; \
19115 .pushsection .altinstructions, "a" ; \
19116@@ -56,6 +85,37 @@
19117
19118 #include <asm/alternative.h>
19119
19120+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19121+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19122+
19123+extern void __pax_open_userland(void);
19124+static __always_inline unsigned long pax_open_userland(void)
19125+{
19126+
19127+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19128+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19129+ :
19130+ : [open] "i" (__pax_open_userland)
19131+ : "memory", "rax");
19132+#endif
19133+
19134+ return 0;
19135+}
19136+
19137+extern void __pax_close_userland(void);
19138+static __always_inline unsigned long pax_close_userland(void)
19139+{
19140+
19141+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19142+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19143+ :
19144+ : [close] "i" (__pax_close_userland)
19145+ : "memory", "rax");
19146+#endif
19147+
19148+ return 0;
19149+}
19150+
19151 #ifdef CONFIG_X86_SMAP
19152
19153 static __always_inline void clac(void)
19154diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19155index 8cd1cc3..827e09e 100644
19156--- a/arch/x86/include/asm/smp.h
19157+++ b/arch/x86/include/asm/smp.h
19158@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19159 /* cpus sharing the last level cache: */
19160 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19161 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19162-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19163+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19164
19165 static inline struct cpumask *cpu_sibling_mask(int cpu)
19166 {
19167@@ -78,7 +78,7 @@ struct smp_ops {
19168
19169 void (*send_call_func_ipi)(const struct cpumask *mask);
19170 void (*send_call_func_single_ipi)(int cpu);
19171-};
19172+} __no_const;
19173
19174 /* Globals due to paravirt */
19175 extern void set_cpu_sibling_map(int cpu);
19176@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19177 extern int safe_smp_processor_id(void);
19178
19179 #elif defined(CONFIG_X86_64_SMP)
19180-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19181-
19182-#define stack_smp_processor_id() \
19183-({ \
19184- struct thread_info *ti; \
19185- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19186- ti->cpu; \
19187-})
19188+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19189+#define stack_smp_processor_id() raw_smp_processor_id()
19190 #define safe_smp_processor_id() smp_processor_id()
19191
19192 #endif
19193diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19194index 6a99859..03cb807 100644
19195--- a/arch/x86/include/asm/stackprotector.h
19196+++ b/arch/x86/include/asm/stackprotector.h
19197@@ -47,7 +47,7 @@
19198 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19199 */
19200 #define GDT_STACK_CANARY_INIT \
19201- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19202+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19203
19204 /*
19205 * Initialize the stackprotector canary value.
19206@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19207
19208 static inline void load_stack_canary_segment(void)
19209 {
19210-#ifdef CONFIG_X86_32
19211+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19212 asm volatile ("mov %0, %%gs" : : "r" (0));
19213 #endif
19214 }
19215diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19216index 70bbe39..4ae2bd4 100644
19217--- a/arch/x86/include/asm/stacktrace.h
19218+++ b/arch/x86/include/asm/stacktrace.h
19219@@ -11,28 +11,20 @@
19220
19221 extern int kstack_depth_to_print;
19222
19223-struct thread_info;
19224+struct task_struct;
19225 struct stacktrace_ops;
19226
19227-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19228- unsigned long *stack,
19229- unsigned long bp,
19230- const struct stacktrace_ops *ops,
19231- void *data,
19232- unsigned long *end,
19233- int *graph);
19234+typedef unsigned long walk_stack_t(struct task_struct *task,
19235+ void *stack_start,
19236+ unsigned long *stack,
19237+ unsigned long bp,
19238+ const struct stacktrace_ops *ops,
19239+ void *data,
19240+ unsigned long *end,
19241+ int *graph);
19242
19243-extern unsigned long
19244-print_context_stack(struct thread_info *tinfo,
19245- unsigned long *stack, unsigned long bp,
19246- const struct stacktrace_ops *ops, void *data,
19247- unsigned long *end, int *graph);
19248-
19249-extern unsigned long
19250-print_context_stack_bp(struct thread_info *tinfo,
19251- unsigned long *stack, unsigned long bp,
19252- const struct stacktrace_ops *ops, void *data,
19253- unsigned long *end, int *graph);
19254+extern walk_stack_t print_context_stack;
19255+extern walk_stack_t print_context_stack_bp;
19256
19257 /* Generic stack tracer with callbacks */
19258
19259@@ -40,7 +32,7 @@ struct stacktrace_ops {
19260 void (*address)(void *data, unsigned long address, int reliable);
19261 /* On negative return stop dumping */
19262 int (*stack)(void *data, char *name);
19263- walk_stack_t walk_stack;
19264+ walk_stack_t *walk_stack;
19265 };
19266
19267 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19268diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19269index 751bf4b..a1278b5 100644
19270--- a/arch/x86/include/asm/switch_to.h
19271+++ b/arch/x86/include/asm/switch_to.h
19272@@ -112,7 +112,7 @@ do { \
19273 "call __switch_to\n\t" \
19274 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19275 __switch_canary \
19276- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19277+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19278 "movq %%rax,%%rdi\n\t" \
19279 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19280 "jnz ret_from_fork\n\t" \
19281@@ -123,7 +123,7 @@ do { \
19282 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19283 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19284 [_tif_fork] "i" (_TIF_FORK), \
19285- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19286+ [thread_info] "m" (current_tinfo), \
19287 [current_task] "m" (current_task) \
19288 __switch_canary_iparam \
19289 : "memory", "cc" __EXTRA_CLOBBER)
19290diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19291index 1d4e4f2..506db18 100644
19292--- a/arch/x86/include/asm/thread_info.h
19293+++ b/arch/x86/include/asm/thread_info.h
19294@@ -24,7 +24,6 @@ struct exec_domain;
19295 #include <linux/atomic.h>
19296
19297 struct thread_info {
19298- struct task_struct *task; /* main task structure */
19299 struct exec_domain *exec_domain; /* execution domain */
19300 __u32 flags; /* low level flags */
19301 __u32 status; /* thread synchronous flags */
19302@@ -32,13 +31,13 @@ struct thread_info {
19303 int saved_preempt_count;
19304 mm_segment_t addr_limit;
19305 void __user *sysenter_return;
19306+ unsigned long lowest_stack;
19307 unsigned int sig_on_uaccess_error:1;
19308 unsigned int uaccess_err:1; /* uaccess failed */
19309 };
19310
19311-#define INIT_THREAD_INFO(tsk) \
19312+#define INIT_THREAD_INFO \
19313 { \
19314- .task = &tsk, \
19315 .exec_domain = &default_exec_domain, \
19316 .flags = 0, \
19317 .cpu = 0, \
19318@@ -46,7 +45,7 @@ struct thread_info {
19319 .addr_limit = KERNEL_DS, \
19320 }
19321
19322-#define init_thread_info (init_thread_union.thread_info)
19323+#define init_thread_info (init_thread_union.stack)
19324 #define init_stack (init_thread_union.stack)
19325
19326 #else /* !__ASSEMBLY__ */
19327@@ -86,6 +85,7 @@ struct thread_info {
19328 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19329 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19330 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19331+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19332
19333 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19334 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19335@@ -109,17 +109,18 @@ struct thread_info {
19336 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19337 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19338 #define _TIF_X32 (1 << TIF_X32)
19339+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19340
19341 /* work to do in syscall_trace_enter() */
19342 #define _TIF_WORK_SYSCALL_ENTRY \
19343 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19344 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19345- _TIF_NOHZ)
19346+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19347
19348 /* work to do in syscall_trace_leave() */
19349 #define _TIF_WORK_SYSCALL_EXIT \
19350 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19351- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19352+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19353
19354 /* work to do on interrupt/exception return */
19355 #define _TIF_WORK_MASK \
19356@@ -130,7 +131,7 @@ struct thread_info {
19357 /* work to do on any return to user space */
19358 #define _TIF_ALLWORK_MASK \
19359 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19360- _TIF_NOHZ)
19361+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19362
19363 /* Only used for 64 bit */
19364 #define _TIF_DO_NOTIFY_MASK \
19365@@ -145,7 +146,6 @@ struct thread_info {
19366 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19367
19368 #define STACK_WARN (THREAD_SIZE/8)
19369-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19370
19371 /*
19372 * macros/functions for gaining access to the thread information structure
19373@@ -156,12 +156,11 @@ struct thread_info {
19374
19375 DECLARE_PER_CPU(unsigned long, kernel_stack);
19376
19377+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19378+
19379 static inline struct thread_info *current_thread_info(void)
19380 {
19381- struct thread_info *ti;
19382- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19383- KERNEL_STACK_OFFSET - THREAD_SIZE);
19384- return ti;
19385+ return this_cpu_read_stable(current_tinfo);
19386 }
19387
19388 static inline unsigned long current_stack_pointer(void)
19389@@ -179,14 +178,7 @@ static inline unsigned long current_stack_pointer(void)
19390
19391 /* how to get the thread information struct from ASM */
19392 #define GET_THREAD_INFO(reg) \
19393- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19394- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19395-
19396-/*
19397- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19398- * a certain register (to be used in assembler memory operands).
19399- */
19400-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19401+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19402
19403 #endif
19404
19405@@ -242,5 +234,12 @@ static inline bool is_ia32_task(void)
19406 extern void arch_task_cache_init(void);
19407 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19408 extern void arch_release_task_struct(struct task_struct *tsk);
19409+
19410+#define __HAVE_THREAD_FUNCTIONS
19411+#define task_thread_info(task) (&(task)->tinfo)
19412+#define task_stack_page(task) ((task)->stack)
19413+#define setup_thread_stack(p, org) do {} while (0)
19414+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19415+
19416 #endif
19417 #endif /* _ASM_X86_THREAD_INFO_H */
19418diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19419index cd79194..e7a9491 100644
19420--- a/arch/x86/include/asm/tlbflush.h
19421+++ b/arch/x86/include/asm/tlbflush.h
19422@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19423
19424 static inline void __native_flush_tlb(void)
19425 {
19426+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19427+ u64 descriptor[2];
19428+
19429+ descriptor[0] = PCID_KERNEL;
19430+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19431+ return;
19432+ }
19433+
19434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19435+ if (static_cpu_has(X86_FEATURE_PCID)) {
19436+ unsigned int cpu = raw_get_cpu();
19437+
19438+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19439+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19440+ raw_put_cpu_no_resched();
19441+ return;
19442+ }
19443+#endif
19444+
19445 native_write_cr3(native_read_cr3());
19446 }
19447
19448 static inline void __native_flush_tlb_global_irq_disabled(void)
19449 {
19450- unsigned long cr4;
19451+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19452+ u64 descriptor[2];
19453
19454- cr4 = this_cpu_read(cpu_tlbstate.cr4);
19455- /* clear PGE */
19456- native_write_cr4(cr4 & ~X86_CR4_PGE);
19457- /* write old PGE again and flush TLBs */
19458- native_write_cr4(cr4);
19459+ descriptor[0] = PCID_KERNEL;
19460+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19461+ } else {
19462+ unsigned long cr4;
19463+
19464+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
19465+ /* clear PGE */
19466+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19467+ /* write old PGE again and flush TLBs */
19468+ native_write_cr4(cr4);
19469+ }
19470 }
19471
19472 static inline void __native_flush_tlb_global(void)
19473@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19474
19475 static inline void __native_flush_tlb_single(unsigned long addr)
19476 {
19477+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19478+ u64 descriptor[2];
19479+
19480+ descriptor[0] = PCID_KERNEL;
19481+ descriptor[1] = addr;
19482+
19483+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19484+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19485+ if (addr < TASK_SIZE_MAX)
19486+ descriptor[1] += pax_user_shadow_base;
19487+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19488+ }
19489+
19490+ descriptor[0] = PCID_USER;
19491+ descriptor[1] = addr;
19492+#endif
19493+
19494+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19495+ return;
19496+ }
19497+
19498+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19499+ if (static_cpu_has(X86_FEATURE_PCID)) {
19500+ unsigned int cpu = raw_get_cpu();
19501+
19502+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19503+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19504+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19505+ raw_put_cpu_no_resched();
19506+
19507+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19508+ addr += pax_user_shadow_base;
19509+ }
19510+#endif
19511+
19512 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19513 }
19514
19515diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19516index ace9dec..3f9e253 100644
19517--- a/arch/x86/include/asm/uaccess.h
19518+++ b/arch/x86/include/asm/uaccess.h
19519@@ -7,6 +7,7 @@
19520 #include <linux/compiler.h>
19521 #include <linux/thread_info.h>
19522 #include <linux/string.h>
19523+#include <linux/spinlock.h>
19524 #include <asm/asm.h>
19525 #include <asm/page.h>
19526 #include <asm/smap.h>
19527@@ -29,7 +30,12 @@
19528
19529 #define get_ds() (KERNEL_DS)
19530 #define get_fs() (current_thread_info()->addr_limit)
19531+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19532+void __set_fs(mm_segment_t x);
19533+void set_fs(mm_segment_t x);
19534+#else
19535 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19536+#endif
19537
19538 #define segment_eq(a, b) ((a).seg == (b).seg)
19539
19540@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19541 * checks that the pointer is in the user space range - after calling
19542 * this function, memory access functions may still return -EFAULT.
19543 */
19544-#define access_ok(type, addr, size) \
19545- likely(!__range_not_ok(addr, size, user_addr_max()))
19546+extern int _cond_resched(void);
19547+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19548+#define access_ok(type, addr, size) \
19549+({ \
19550+ unsigned long __size = size; \
19551+ unsigned long __addr = (unsigned long)addr; \
19552+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19553+ if (__ret_ao && __size) { \
19554+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19555+ unsigned long __end_ao = __addr + __size - 1; \
19556+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19557+ while (__addr_ao <= __end_ao) { \
19558+ char __c_ao; \
19559+ __addr_ao += PAGE_SIZE; \
19560+ if (__size > PAGE_SIZE) \
19561+ _cond_resched(); \
19562+ if (__get_user(__c_ao, (char __user *)__addr)) \
19563+ break; \
19564+ if (type != VERIFY_WRITE) { \
19565+ __addr = __addr_ao; \
19566+ continue; \
19567+ } \
19568+ if (__put_user(__c_ao, (char __user *)__addr)) \
19569+ break; \
19570+ __addr = __addr_ao; \
19571+ } \
19572+ } \
19573+ } \
19574+ __ret_ao; \
19575+})
19576
19577 /*
19578 * The exception table consists of pairs of addresses relative to the
19579@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19580 extern int __get_user_bad(void);
19581
19582 /*
19583- * This is a type: either unsigned long, if the argument fits into
19584- * that type, or otherwise unsigned long long.
19585+ * This is a type: either (un)signed int, if the argument fits into
19586+ * that type, or otherwise (un)signed long long.
19587 */
19588 #define __inttype(x) \
19589-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19590+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19591+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19592+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19593
19594 /**
19595 * get_user: - Get a simple variable from user space.
19596@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19597 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19598 __chk_user_ptr(ptr); \
19599 might_fault(); \
19600+ pax_open_userland(); \
19601 asm volatile("call __get_user_%P3" \
19602 : "=a" (__ret_gu), "=r" (__val_gu) \
19603 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19604 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19605+ pax_close_userland(); \
19606 __ret_gu; \
19607 })
19608
19609@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19610 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19611 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19612
19613-
19614+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19615+#define __copyuser_seg "gs;"
19616+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19617+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19618+#else
19619+#define __copyuser_seg
19620+#define __COPYUSER_SET_ES
19621+#define __COPYUSER_RESTORE_ES
19622+#endif
19623
19624 #ifdef CONFIG_X86_32
19625 #define __put_user_asm_u64(x, addr, err, errret) \
19626 asm volatile(ASM_STAC "\n" \
19627- "1: movl %%eax,0(%2)\n" \
19628- "2: movl %%edx,4(%2)\n" \
19629+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19630+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19631 "3: " ASM_CLAC "\n" \
19632 ".section .fixup,\"ax\"\n" \
19633 "4: movl %3,%0\n" \
19634@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19635
19636 #define __put_user_asm_ex_u64(x, addr) \
19637 asm volatile(ASM_STAC "\n" \
19638- "1: movl %%eax,0(%1)\n" \
19639- "2: movl %%edx,4(%1)\n" \
19640+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19641+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19642 "3: " ASM_CLAC "\n" \
19643 _ASM_EXTABLE_EX(1b, 2b) \
19644 _ASM_EXTABLE_EX(2b, 3b) \
19645@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19646 __typeof__(*(ptr)) __pu_val; \
19647 __chk_user_ptr(ptr); \
19648 might_fault(); \
19649- __pu_val = x; \
19650+ __pu_val = (x); \
19651+ pax_open_userland(); \
19652 switch (sizeof(*(ptr))) { \
19653 case 1: \
19654 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19655@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19656 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19657 break; \
19658 } \
19659+ pax_close_userland(); \
19660 __ret_pu; \
19661 })
19662
19663@@ -355,8 +403,10 @@ do { \
19664 } while (0)
19665
19666 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19667+do { \
19668+ pax_open_userland(); \
19669 asm volatile(ASM_STAC "\n" \
19670- "1: mov"itype" %2,%"rtype"1\n" \
19671+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19672 "2: " ASM_CLAC "\n" \
19673 ".section .fixup,\"ax\"\n" \
19674 "3: mov %3,%0\n" \
19675@@ -364,8 +414,10 @@ do { \
19676 " jmp 2b\n" \
19677 ".previous\n" \
19678 _ASM_EXTABLE(1b, 3b) \
19679- : "=r" (err), ltype(x) \
19680- : "m" (__m(addr)), "i" (errret), "0" (err))
19681+ : "=r" (err), ltype (x) \
19682+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19683+ pax_close_userland(); \
19684+} while (0)
19685
19686 #define __get_user_size_ex(x, ptr, size) \
19687 do { \
19688@@ -389,7 +441,7 @@ do { \
19689 } while (0)
19690
19691 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19692- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19693+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19694 "2:\n" \
19695 _ASM_EXTABLE_EX(1b, 2b) \
19696 : ltype(x) : "m" (__m(addr)))
19697@@ -406,13 +458,24 @@ do { \
19698 int __gu_err; \
19699 unsigned long __gu_val; \
19700 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19701- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19702+ (x) = (__typeof__(*(ptr)))__gu_val; \
19703 __gu_err; \
19704 })
19705
19706 /* FIXME: this hack is definitely wrong -AK */
19707 struct __large_struct { unsigned long buf[100]; };
19708-#define __m(x) (*(struct __large_struct __user *)(x))
19709+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19710+#define ____m(x) \
19711+({ \
19712+ unsigned long ____x = (unsigned long)(x); \
19713+ if (____x < pax_user_shadow_base) \
19714+ ____x += pax_user_shadow_base; \
19715+ (typeof(x))____x; \
19716+})
19717+#else
19718+#define ____m(x) (x)
19719+#endif
19720+#define __m(x) (*(struct __large_struct __user *)____m(x))
19721
19722 /*
19723 * Tell gcc we read from memory instead of writing: this is because
19724@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19725 * aliasing issues.
19726 */
19727 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19728+do { \
19729+ pax_open_userland(); \
19730 asm volatile(ASM_STAC "\n" \
19731- "1: mov"itype" %"rtype"1,%2\n" \
19732+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19733 "2: " ASM_CLAC "\n" \
19734 ".section .fixup,\"ax\"\n" \
19735 "3: mov %3,%0\n" \
19736@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19737 ".previous\n" \
19738 _ASM_EXTABLE(1b, 3b) \
19739 : "=r"(err) \
19740- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19741+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19742+ pax_close_userland(); \
19743+} while (0)
19744
19745 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19746- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19747+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19748 "2:\n" \
19749 _ASM_EXTABLE_EX(1b, 2b) \
19750 : : ltype(x), "m" (__m(addr)))
19751@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19752 */
19753 #define uaccess_try do { \
19754 current_thread_info()->uaccess_err = 0; \
19755+ pax_open_userland(); \
19756 stac(); \
19757 barrier();
19758
19759 #define uaccess_catch(err) \
19760 clac(); \
19761+ pax_close_userland(); \
19762 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19763 } while (0)
19764
19765@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19766 * On error, the variable @x is set to zero.
19767 */
19768
19769+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19770+#define __get_user(x, ptr) get_user((x), (ptr))
19771+#else
19772 #define __get_user(x, ptr) \
19773 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19774+#endif
19775
19776 /**
19777 * __put_user: - Write a simple value into user space, with less checking.
19778@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19779 * Returns zero on success, or -EFAULT on error.
19780 */
19781
19782+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19783+#define __put_user(x, ptr) put_user((x), (ptr))
19784+#else
19785 #define __put_user(x, ptr) \
19786 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19787+#endif
19788
19789 #define __get_user_unaligned __get_user
19790 #define __put_user_unaligned __put_user
19791@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19792 #define get_user_ex(x, ptr) do { \
19793 unsigned long __gue_val; \
19794 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19795- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19796+ (x) = (__typeof__(*(ptr)))__gue_val; \
19797 } while (0)
19798
19799 #define put_user_try uaccess_try
19800@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19801 extern __must_check long strnlen_user(const char __user *str, long n);
19802
19803 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19804-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19805+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19806
19807 extern void __cmpxchg_wrong_size(void)
19808 __compiletime_error("Bad argument size for cmpxchg");
19809@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19810 __typeof__(ptr) __uval = (uval); \
19811 __typeof__(*(ptr)) __old = (old); \
19812 __typeof__(*(ptr)) __new = (new); \
19813+ pax_open_userland(); \
19814 switch (size) { \
19815 case 1: \
19816 { \
19817 asm volatile("\t" ASM_STAC "\n" \
19818- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19819+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19820 "2:\t" ASM_CLAC "\n" \
19821 "\t.section .fixup, \"ax\"\n" \
19822 "3:\tmov %3, %0\n" \
19823 "\tjmp 2b\n" \
19824 "\t.previous\n" \
19825 _ASM_EXTABLE(1b, 3b) \
19826- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19827+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19828 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19829 : "memory" \
19830 ); \
19831@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19832 case 2: \
19833 { \
19834 asm volatile("\t" ASM_STAC "\n" \
19835- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19836+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19837 "2:\t" ASM_CLAC "\n" \
19838 "\t.section .fixup, \"ax\"\n" \
19839 "3:\tmov %3, %0\n" \
19840 "\tjmp 2b\n" \
19841 "\t.previous\n" \
19842 _ASM_EXTABLE(1b, 3b) \
19843- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19844+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19845 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19846 : "memory" \
19847 ); \
19848@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19849 case 4: \
19850 { \
19851 asm volatile("\t" ASM_STAC "\n" \
19852- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19853+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19854 "2:\t" ASM_CLAC "\n" \
19855 "\t.section .fixup, \"ax\"\n" \
19856 "3:\tmov %3, %0\n" \
19857 "\tjmp 2b\n" \
19858 "\t.previous\n" \
19859 _ASM_EXTABLE(1b, 3b) \
19860- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19861+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19862 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19863 : "memory" \
19864 ); \
19865@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19866 __cmpxchg_wrong_size(); \
19867 \
19868 asm volatile("\t" ASM_STAC "\n" \
19869- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19870+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19871 "2:\t" ASM_CLAC "\n" \
19872 "\t.section .fixup, \"ax\"\n" \
19873 "3:\tmov %3, %0\n" \
19874 "\tjmp 2b\n" \
19875 "\t.previous\n" \
19876 _ASM_EXTABLE(1b, 3b) \
19877- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19878+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19879 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19880 : "memory" \
19881 ); \
19882@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19883 default: \
19884 __cmpxchg_wrong_size(); \
19885 } \
19886+ pax_close_userland(); \
19887 *__uval = __old; \
19888 __ret; \
19889 })
19890@@ -636,17 +715,6 @@ extern struct movsl_mask {
19891
19892 #define ARCH_HAS_NOCACHE_UACCESS 1
19893
19894-#ifdef CONFIG_X86_32
19895-# include <asm/uaccess_32.h>
19896-#else
19897-# include <asm/uaccess_64.h>
19898-#endif
19899-
19900-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19901- unsigned n);
19902-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19903- unsigned n);
19904-
19905 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19906 # define copy_user_diag __compiletime_error
19907 #else
19908@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19909 extern void copy_user_diag("copy_from_user() buffer size is too small")
19910 copy_from_user_overflow(void);
19911 extern void copy_user_diag("copy_to_user() buffer size is too small")
19912-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19913+copy_to_user_overflow(void);
19914
19915 #undef copy_user_diag
19916
19917@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19918
19919 extern void
19920 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19921-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19922+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19923 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19924
19925 #else
19926@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19927
19928 #endif
19929
19930+#ifdef CONFIG_X86_32
19931+# include <asm/uaccess_32.h>
19932+#else
19933+# include <asm/uaccess_64.h>
19934+#endif
19935+
19936 static inline unsigned long __must_check
19937 copy_from_user(void *to, const void __user *from, unsigned long n)
19938 {
19939- int sz = __compiletime_object_size(to);
19940+ size_t sz = __compiletime_object_size(to);
19941
19942 might_fault();
19943
19944@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19945 * case, and do only runtime checking for non-constant sizes.
19946 */
19947
19948- if (likely(sz < 0 || sz >= n))
19949- n = _copy_from_user(to, from, n);
19950- else if(__builtin_constant_p(n))
19951- copy_from_user_overflow();
19952- else
19953- __copy_from_user_overflow(sz, n);
19954+ if (likely(sz != (size_t)-1 && sz < n)) {
19955+ if(__builtin_constant_p(n))
19956+ copy_from_user_overflow();
19957+ else
19958+ __copy_from_user_overflow(sz, n);
19959+ } else if (access_ok(VERIFY_READ, from, n))
19960+ n = __copy_from_user(to, from, n);
19961+ else if ((long)n > 0)
19962+ memset(to, 0, n);
19963
19964 return n;
19965 }
19966@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19967 static inline unsigned long __must_check
19968 copy_to_user(void __user *to, const void *from, unsigned long n)
19969 {
19970- int sz = __compiletime_object_size(from);
19971+ size_t sz = __compiletime_object_size(from);
19972
19973 might_fault();
19974
19975 /* See the comment in copy_from_user() above. */
19976- if (likely(sz < 0 || sz >= n))
19977- n = _copy_to_user(to, from, n);
19978- else if(__builtin_constant_p(n))
19979- copy_to_user_overflow();
19980- else
19981- __copy_to_user_overflow(sz, n);
19982+ if (likely(sz != (size_t)-1 && sz < n)) {
19983+ if(__builtin_constant_p(n))
19984+ copy_to_user_overflow();
19985+ else
19986+ __copy_to_user_overflow(sz, n);
19987+ } else if (access_ok(VERIFY_WRITE, to, n))
19988+ n = __copy_to_user(to, from, n);
19989
19990 return n;
19991 }
19992diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
19993index 3c03a5d..edb68ae 100644
19994--- a/arch/x86/include/asm/uaccess_32.h
19995+++ b/arch/x86/include/asm/uaccess_32.h
19996@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
19997 * anything, so this is accurate.
19998 */
19999
20000-static __always_inline unsigned long __must_check
20001+static __always_inline __size_overflow(3) unsigned long __must_check
20002 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20003 {
20004+ if ((long)n < 0)
20005+ return n;
20006+
20007+ check_object_size(from, n, true);
20008+
20009 if (__builtin_constant_p(n)) {
20010 unsigned long ret;
20011
20012@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20013 __copy_to_user(void __user *to, const void *from, unsigned long n)
20014 {
20015 might_fault();
20016+
20017 return __copy_to_user_inatomic(to, from, n);
20018 }
20019
20020-static __always_inline unsigned long
20021+static __always_inline __size_overflow(3) unsigned long
20022 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20023 {
20024+ if ((long)n < 0)
20025+ return n;
20026+
20027 /* Avoid zeroing the tail if the copy fails..
20028 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20029 * but as the zeroing behaviour is only significant when n is not
20030@@ -137,6 +146,12 @@ static __always_inline unsigned long
20031 __copy_from_user(void *to, const void __user *from, unsigned long n)
20032 {
20033 might_fault();
20034+
20035+ if ((long)n < 0)
20036+ return n;
20037+
20038+ check_object_size(to, n, false);
20039+
20040 if (__builtin_constant_p(n)) {
20041 unsigned long ret;
20042
20043@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20044 const void __user *from, unsigned long n)
20045 {
20046 might_fault();
20047+
20048+ if ((long)n < 0)
20049+ return n;
20050+
20051 if (__builtin_constant_p(n)) {
20052 unsigned long ret;
20053
20054@@ -181,7 +200,10 @@ static __always_inline unsigned long
20055 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20056 unsigned long n)
20057 {
20058- return __copy_from_user_ll_nocache_nozero(to, from, n);
20059+ if ((long)n < 0)
20060+ return n;
20061+
20062+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20063 }
20064
20065 #endif /* _ASM_X86_UACCESS_32_H */
20066diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20067index f2f9b39..2ae1bf8 100644
20068--- a/arch/x86/include/asm/uaccess_64.h
20069+++ b/arch/x86/include/asm/uaccess_64.h
20070@@ -10,6 +10,9 @@
20071 #include <asm/alternative.h>
20072 #include <asm/cpufeature.h>
20073 #include <asm/page.h>
20074+#include <asm/pgtable.h>
20075+
20076+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20077
20078 /*
20079 * Copy To/From Userspace
20080@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20081 __must_check unsigned long
20082 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20083
20084-static __always_inline __must_check unsigned long
20085-copy_user_generic(void *to, const void *from, unsigned len)
20086+static __always_inline __must_check __size_overflow(3) unsigned long
20087+copy_user_generic(void *to, const void *from, unsigned long len)
20088 {
20089 unsigned ret;
20090
20091@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20092 }
20093
20094 __must_check unsigned long
20095-copy_in_user(void __user *to, const void __user *from, unsigned len);
20096+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20097
20098 static __always_inline __must_check
20099-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20100+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20101 {
20102- int ret = 0;
20103+ size_t sz = __compiletime_object_size(dst);
20104+ unsigned ret = 0;
20105+
20106+ if (size > INT_MAX)
20107+ return size;
20108+
20109+ check_object_size(dst, size, false);
20110+
20111+#ifdef CONFIG_PAX_MEMORY_UDEREF
20112+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20113+ return size;
20114+#endif
20115+
20116+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20117+ if(__builtin_constant_p(size))
20118+ copy_from_user_overflow();
20119+ else
20120+ __copy_from_user_overflow(sz, size);
20121+ return size;
20122+ }
20123
20124 if (!__builtin_constant_p(size))
20125- return copy_user_generic(dst, (__force void *)src, size);
20126+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20127 switch (size) {
20128- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20129+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20130 ret, "b", "b", "=q", 1);
20131 return ret;
20132- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20133+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20134 ret, "w", "w", "=r", 2);
20135 return ret;
20136- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20137+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20138 ret, "l", "k", "=r", 4);
20139 return ret;
20140- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20141+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20142 ret, "q", "", "=r", 8);
20143 return ret;
20144 case 10:
20145- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20146+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20147 ret, "q", "", "=r", 10);
20148 if (unlikely(ret))
20149 return ret;
20150 __get_user_asm(*(u16 *)(8 + (char *)dst),
20151- (u16 __user *)(8 + (char __user *)src),
20152+ (const u16 __user *)(8 + (const char __user *)src),
20153 ret, "w", "w", "=r", 2);
20154 return ret;
20155 case 16:
20156- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20157+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20158 ret, "q", "", "=r", 16);
20159 if (unlikely(ret))
20160 return ret;
20161 __get_user_asm(*(u64 *)(8 + (char *)dst),
20162- (u64 __user *)(8 + (char __user *)src),
20163+ (const u64 __user *)(8 + (const char __user *)src),
20164 ret, "q", "", "=r", 8);
20165 return ret;
20166 default:
20167- return copy_user_generic(dst, (__force void *)src, size);
20168+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20169 }
20170 }
20171
20172 static __always_inline __must_check
20173-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20174+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20175 {
20176 might_fault();
20177 return __copy_from_user_nocheck(dst, src, size);
20178 }
20179
20180 static __always_inline __must_check
20181-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20182+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20183 {
20184- int ret = 0;
20185+ size_t sz = __compiletime_object_size(src);
20186+ unsigned ret = 0;
20187+
20188+ if (size > INT_MAX)
20189+ return size;
20190+
20191+ check_object_size(src, size, true);
20192+
20193+#ifdef CONFIG_PAX_MEMORY_UDEREF
20194+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20195+ return size;
20196+#endif
20197+
20198+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20199+ if(__builtin_constant_p(size))
20200+ copy_to_user_overflow();
20201+ else
20202+ __copy_to_user_overflow(sz, size);
20203+ return size;
20204+ }
20205
20206 if (!__builtin_constant_p(size))
20207- return copy_user_generic((__force void *)dst, src, size);
20208+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20209 switch (size) {
20210- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20211+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20212 ret, "b", "b", "iq", 1);
20213 return ret;
20214- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20215+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20216 ret, "w", "w", "ir", 2);
20217 return ret;
20218- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20219+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20220 ret, "l", "k", "ir", 4);
20221 return ret;
20222- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20223+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20224 ret, "q", "", "er", 8);
20225 return ret;
20226 case 10:
20227- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20228+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20229 ret, "q", "", "er", 10);
20230 if (unlikely(ret))
20231 return ret;
20232 asm("":::"memory");
20233- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20234+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20235 ret, "w", "w", "ir", 2);
20236 return ret;
20237 case 16:
20238- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20239+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20240 ret, "q", "", "er", 16);
20241 if (unlikely(ret))
20242 return ret;
20243 asm("":::"memory");
20244- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20245+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20246 ret, "q", "", "er", 8);
20247 return ret;
20248 default:
20249- return copy_user_generic((__force void *)dst, src, size);
20250+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20251 }
20252 }
20253
20254 static __always_inline __must_check
20255-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20256+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20257 {
20258 might_fault();
20259 return __copy_to_user_nocheck(dst, src, size);
20260 }
20261
20262 static __always_inline __must_check
20263-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20264+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20265 {
20266- int ret = 0;
20267+ unsigned ret = 0;
20268
20269 might_fault();
20270+
20271+ if (size > INT_MAX)
20272+ return size;
20273+
20274+#ifdef CONFIG_PAX_MEMORY_UDEREF
20275+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20276+ return size;
20277+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20278+ return size;
20279+#endif
20280+
20281 if (!__builtin_constant_p(size))
20282- return copy_user_generic((__force void *)dst,
20283- (__force void *)src, size);
20284+ return copy_user_generic((__force_kernel void *)____m(dst),
20285+ (__force_kernel const void *)____m(src), size);
20286 switch (size) {
20287 case 1: {
20288 u8 tmp;
20289- __get_user_asm(tmp, (u8 __user *)src,
20290+ __get_user_asm(tmp, (const u8 __user *)src,
20291 ret, "b", "b", "=q", 1);
20292 if (likely(!ret))
20293 __put_user_asm(tmp, (u8 __user *)dst,
20294@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20295 }
20296 case 2: {
20297 u16 tmp;
20298- __get_user_asm(tmp, (u16 __user *)src,
20299+ __get_user_asm(tmp, (const u16 __user *)src,
20300 ret, "w", "w", "=r", 2);
20301 if (likely(!ret))
20302 __put_user_asm(tmp, (u16 __user *)dst,
20303@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20304
20305 case 4: {
20306 u32 tmp;
20307- __get_user_asm(tmp, (u32 __user *)src,
20308+ __get_user_asm(tmp, (const u32 __user *)src,
20309 ret, "l", "k", "=r", 4);
20310 if (likely(!ret))
20311 __put_user_asm(tmp, (u32 __user *)dst,
20312@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20313 }
20314 case 8: {
20315 u64 tmp;
20316- __get_user_asm(tmp, (u64 __user *)src,
20317+ __get_user_asm(tmp, (const u64 __user *)src,
20318 ret, "q", "", "=r", 8);
20319 if (likely(!ret))
20320 __put_user_asm(tmp, (u64 __user *)dst,
20321@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20322 return ret;
20323 }
20324 default:
20325- return copy_user_generic((__force void *)dst,
20326- (__force void *)src, size);
20327+ return copy_user_generic((__force_kernel void *)____m(dst),
20328+ (__force_kernel const void *)____m(src), size);
20329 }
20330 }
20331
20332-static __must_check __always_inline int
20333-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20334+static __must_check __always_inline unsigned long
20335+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20336 {
20337 return __copy_from_user_nocheck(dst, src, size);
20338 }
20339
20340-static __must_check __always_inline int
20341-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20342+static __must_check __always_inline unsigned long
20343+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20344 {
20345 return __copy_to_user_nocheck(dst, src, size);
20346 }
20347
20348-extern long __copy_user_nocache(void *dst, const void __user *src,
20349- unsigned size, int zerorest);
20350+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20351+ unsigned long size, int zerorest);
20352
20353-static inline int
20354-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20355+static inline unsigned long
20356+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20357 {
20358 might_fault();
20359+
20360+ if (size > INT_MAX)
20361+ return size;
20362+
20363+#ifdef CONFIG_PAX_MEMORY_UDEREF
20364+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20365+ return size;
20366+#endif
20367+
20368 return __copy_user_nocache(dst, src, size, 1);
20369 }
20370
20371-static inline int
20372+static inline unsigned long
20373 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20374- unsigned size)
20375+ unsigned long size)
20376 {
20377+ if (size > INT_MAX)
20378+ return size;
20379+
20380+#ifdef CONFIG_PAX_MEMORY_UDEREF
20381+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20382+ return size;
20383+#endif
20384+
20385 return __copy_user_nocache(dst, src, size, 0);
20386 }
20387
20388 unsigned long
20389-copy_user_handle_tail(char *to, char *from, unsigned len);
20390+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20391
20392 #endif /* _ASM_X86_UACCESS_64_H */
20393diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20394index 5b238981..77fdd78 100644
20395--- a/arch/x86/include/asm/word-at-a-time.h
20396+++ b/arch/x86/include/asm/word-at-a-time.h
20397@@ -11,7 +11,7 @@
20398 * and shift, for example.
20399 */
20400 struct word_at_a_time {
20401- const unsigned long one_bits, high_bits;
20402+ unsigned long one_bits, high_bits;
20403 };
20404
20405 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20406diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20407index f58a9c7..dc378042a 100644
20408--- a/arch/x86/include/asm/x86_init.h
20409+++ b/arch/x86/include/asm/x86_init.h
20410@@ -129,7 +129,7 @@ struct x86_init_ops {
20411 struct x86_init_timers timers;
20412 struct x86_init_iommu iommu;
20413 struct x86_init_pci pci;
20414-};
20415+} __no_const;
20416
20417 /**
20418 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20419@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20420 void (*setup_percpu_clockev)(void);
20421 void (*early_percpu_clock_init)(void);
20422 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20423-};
20424+} __no_const;
20425
20426 struct timespec;
20427
20428@@ -168,7 +168,7 @@ struct x86_platform_ops {
20429 void (*save_sched_clock_state)(void);
20430 void (*restore_sched_clock_state)(void);
20431 void (*apic_post_init)(void);
20432-};
20433+} __no_const;
20434
20435 struct pci_dev;
20436 struct msi_msg;
20437@@ -182,7 +182,7 @@ struct x86_msi_ops {
20438 void (*teardown_msi_irqs)(struct pci_dev *dev);
20439 void (*restore_msi_irqs)(struct pci_dev *dev);
20440 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20441-};
20442+} __no_const;
20443
20444 struct IO_APIC_route_entry;
20445 struct io_apic_irq_attr;
20446@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20447 unsigned int destination, int vector,
20448 struct io_apic_irq_attr *attr);
20449 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20450-};
20451+} __no_const;
20452
20453 extern struct x86_init_ops x86_init;
20454 extern struct x86_cpuinit_ops x86_cpuinit;
20455diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20456index 358dcd3..23c0bf1 100644
20457--- a/arch/x86/include/asm/xen/page.h
20458+++ b/arch/x86/include/asm/xen/page.h
20459@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20460 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20461 * cases needing an extended handling.
20462 */
20463-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20464+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20465 {
20466 unsigned long mfn;
20467
20468diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20469index c9a6d68..cb57f42 100644
20470--- a/arch/x86/include/asm/xsave.h
20471+++ b/arch/x86/include/asm/xsave.h
20472@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20473 if (unlikely(err))
20474 return -EFAULT;
20475
20476+ pax_open_userland();
20477 __asm__ __volatile__(ASM_STAC "\n"
20478- "1:"XSAVE"\n"
20479+ "1:"
20480+ __copyuser_seg
20481+ XSAVE"\n"
20482 "2: " ASM_CLAC "\n"
20483 xstate_fault
20484 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20485 : "memory");
20486+ pax_close_userland();
20487 return err;
20488 }
20489
20490@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20491 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20492 {
20493 int err = 0;
20494- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20495+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20496 u32 lmask = mask;
20497 u32 hmask = mask >> 32;
20498
20499+ pax_open_userland();
20500 __asm__ __volatile__(ASM_STAC "\n"
20501- "1:"XRSTOR"\n"
20502+ "1:"
20503+ __copyuser_seg
20504+ XRSTOR"\n"
20505 "2: " ASM_CLAC "\n"
20506 xstate_fault
20507 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20508 : "memory"); /* memory required? */
20509+ pax_close_userland();
20510 return err;
20511 }
20512
20513diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20514index d993e33..8db1b18 100644
20515--- a/arch/x86/include/uapi/asm/e820.h
20516+++ b/arch/x86/include/uapi/asm/e820.h
20517@@ -58,7 +58,7 @@ struct e820map {
20518 #define ISA_START_ADDRESS 0xa0000
20519 #define ISA_END_ADDRESS 0x100000
20520
20521-#define BIOS_BEGIN 0x000a0000
20522+#define BIOS_BEGIN 0x000c0000
20523 #define BIOS_END 0x00100000
20524
20525 #define BIOS_ROM_BASE 0xffe00000
20526diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20527index 7b0a55a..ad115bf 100644
20528--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20529+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20530@@ -49,7 +49,6 @@
20531 #define EFLAGS 144
20532 #define RSP 152
20533 #define SS 160
20534-#define ARGOFFSET R11
20535 #endif /* __ASSEMBLY__ */
20536
20537 /* top of stack page */
20538diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20539index cdb1b70..426434c 100644
20540--- a/arch/x86/kernel/Makefile
20541+++ b/arch/x86/kernel/Makefile
20542@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20543 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20544 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20545 obj-y += probe_roms.o
20546-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20547+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20548 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20549 obj-$(CONFIG_X86_64) += mcount_64.o
20550 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20551diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20552index 803b684..68c64f1 100644
20553--- a/arch/x86/kernel/acpi/boot.c
20554+++ b/arch/x86/kernel/acpi/boot.c
20555@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20556 * If your system is blacklisted here, but you find that acpi=force
20557 * works for you, please contact linux-acpi@vger.kernel.org
20558 */
20559-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20560+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20561 /*
20562 * Boxes that need ACPI disabled
20563 */
20564@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20565 };
20566
20567 /* second table for DMI checks that should run after early-quirks */
20568-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20569+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20570 /*
20571 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20572 * which includes some code which overrides all temperature
20573diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20574index d1daead..acd77e2 100644
20575--- a/arch/x86/kernel/acpi/sleep.c
20576+++ b/arch/x86/kernel/acpi/sleep.c
20577@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20578 #else /* CONFIG_64BIT */
20579 #ifdef CONFIG_SMP
20580 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20581+
20582+ pax_open_kernel();
20583 early_gdt_descr.address =
20584 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20585+ pax_close_kernel();
20586+
20587 initial_gs = per_cpu_offset(smp_processor_id());
20588 #endif
20589 initial_code = (unsigned long)wakeup_long64;
20590diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20591index 665c6b7..eae4d56 100644
20592--- a/arch/x86/kernel/acpi/wakeup_32.S
20593+++ b/arch/x86/kernel/acpi/wakeup_32.S
20594@@ -29,13 +29,11 @@ wakeup_pmode_return:
20595 # and restore the stack ... but you need gdt for this to work
20596 movl saved_context_esp, %esp
20597
20598- movl %cs:saved_magic, %eax
20599- cmpl $0x12345678, %eax
20600+ cmpl $0x12345678, saved_magic
20601 jne bogus_magic
20602
20603 # jump to place where we left off
20604- movl saved_eip, %eax
20605- jmp *%eax
20606+ jmp *(saved_eip)
20607
20608 bogus_magic:
20609 jmp bogus_magic
20610diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20611index 703130f..27a155d 100644
20612--- a/arch/x86/kernel/alternative.c
20613+++ b/arch/x86/kernel/alternative.c
20614@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20615 */
20616 for (a = start; a < end; a++) {
20617 instr = (u8 *)&a->instr_offset + a->instr_offset;
20618+
20619+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20620+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20621+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20622+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20623+#endif
20624+
20625 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20626 BUG_ON(a->replacementlen > a->instrlen);
20627 BUG_ON(a->instrlen > sizeof(insnbuf));
20628@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20629 add_nops(insnbuf + a->replacementlen,
20630 a->instrlen - a->replacementlen);
20631
20632+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20633+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20634+ instr = ktva_ktla(instr);
20635+#endif
20636+
20637 text_poke_early(instr, insnbuf, a->instrlen);
20638 }
20639 }
20640@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20641 for (poff = start; poff < end; poff++) {
20642 u8 *ptr = (u8 *)poff + *poff;
20643
20644+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20645+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20646+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20647+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20648+#endif
20649+
20650 if (!*poff || ptr < text || ptr >= text_end)
20651 continue;
20652 /* turn DS segment override prefix into lock prefix */
20653- if (*ptr == 0x3e)
20654+ if (*ktla_ktva(ptr) == 0x3e)
20655 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20656 }
20657 mutex_unlock(&text_mutex);
20658@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20659 for (poff = start; poff < end; poff++) {
20660 u8 *ptr = (u8 *)poff + *poff;
20661
20662+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20663+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20664+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20665+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20666+#endif
20667+
20668 if (!*poff || ptr < text || ptr >= text_end)
20669 continue;
20670 /* turn lock prefix into DS segment override prefix */
20671- if (*ptr == 0xf0)
20672+ if (*ktla_ktva(ptr) == 0xf0)
20673 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20674 }
20675 mutex_unlock(&text_mutex);
20676@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20677
20678 BUG_ON(p->len > MAX_PATCH_LEN);
20679 /* prep the buffer with the original instructions */
20680- memcpy(insnbuf, p->instr, p->len);
20681+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20682 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20683 (unsigned long)p->instr, p->len);
20684
20685@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20686 if (!uniproc_patched || num_possible_cpus() == 1)
20687 free_init_pages("SMP alternatives",
20688 (unsigned long)__smp_locks,
20689- (unsigned long)__smp_locks_end);
20690+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20691 #endif
20692
20693 apply_paravirt(__parainstructions, __parainstructions_end);
20694@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20695 * instructions. And on the local CPU you need to be protected again NMI or MCE
20696 * handlers seeing an inconsistent instruction while you patch.
20697 */
20698-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20699+void *__kprobes text_poke_early(void *addr, const void *opcode,
20700 size_t len)
20701 {
20702 unsigned long flags;
20703 local_irq_save(flags);
20704- memcpy(addr, opcode, len);
20705+
20706+ pax_open_kernel();
20707+ memcpy(ktla_ktva(addr), opcode, len);
20708 sync_core();
20709+ pax_close_kernel();
20710+
20711 local_irq_restore(flags);
20712 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20713 that causes hangs on some VIA CPUs. */
20714@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20715 */
20716 void *text_poke(void *addr, const void *opcode, size_t len)
20717 {
20718- unsigned long flags;
20719- char *vaddr;
20720+ unsigned char *vaddr = ktla_ktva(addr);
20721 struct page *pages[2];
20722- int i;
20723+ size_t i;
20724
20725 if (!core_kernel_text((unsigned long)addr)) {
20726- pages[0] = vmalloc_to_page(addr);
20727- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20728+ pages[0] = vmalloc_to_page(vaddr);
20729+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20730 } else {
20731- pages[0] = virt_to_page(addr);
20732+ pages[0] = virt_to_page(vaddr);
20733 WARN_ON(!PageReserved(pages[0]));
20734- pages[1] = virt_to_page(addr + PAGE_SIZE);
20735+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20736 }
20737 BUG_ON(!pages[0]);
20738- local_irq_save(flags);
20739- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20740- if (pages[1])
20741- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20742- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20743- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20744- clear_fixmap(FIX_TEXT_POKE0);
20745- if (pages[1])
20746- clear_fixmap(FIX_TEXT_POKE1);
20747- local_flush_tlb();
20748- sync_core();
20749- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20750- that causes hangs on some VIA CPUs. */
20751+ text_poke_early(addr, opcode, len);
20752 for (i = 0; i < len; i++)
20753- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20754- local_irq_restore(flags);
20755+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20756 return addr;
20757 }
20758
20759@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20760 if (likely(!bp_patching_in_progress))
20761 return 0;
20762
20763- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20764+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20765 return 0;
20766
20767 /* set up the specified breakpoint handler */
20768@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20769 */
20770 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20771 {
20772- unsigned char int3 = 0xcc;
20773+ const unsigned char int3 = 0xcc;
20774
20775 bp_int3_handler = handler;
20776 bp_int3_addr = (u8 *)addr + sizeof(int3);
20777diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20778index ad3639a..bd4253c 100644
20779--- a/arch/x86/kernel/apic/apic.c
20780+++ b/arch/x86/kernel/apic/apic.c
20781@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20782 /*
20783 * Debug level, exported for io_apic.c
20784 */
20785-unsigned int apic_verbosity;
20786+int apic_verbosity;
20787
20788 int pic_mode;
20789
20790@@ -1918,7 +1918,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20791 apic_write(APIC_ESR, 0);
20792 v = apic_read(APIC_ESR);
20793 ack_APIC_irq();
20794- atomic_inc(&irq_err_count);
20795+ atomic_inc_unchecked(&irq_err_count);
20796
20797 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20798 smp_processor_id(), v);
20799diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20800index de918c4..32eed23 100644
20801--- a/arch/x86/kernel/apic/apic_flat_64.c
20802+++ b/arch/x86/kernel/apic/apic_flat_64.c
20803@@ -154,7 +154,7 @@ static int flat_probe(void)
20804 return 1;
20805 }
20806
20807-static struct apic apic_flat = {
20808+static struct apic apic_flat __read_only = {
20809 .name = "flat",
20810 .probe = flat_probe,
20811 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20812@@ -260,7 +260,7 @@ static int physflat_probe(void)
20813 return 0;
20814 }
20815
20816-static struct apic apic_physflat = {
20817+static struct apic apic_physflat __read_only = {
20818
20819 .name = "physical flat",
20820 .probe = physflat_probe,
20821diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20822index b205cdb..d8503ff 100644
20823--- a/arch/x86/kernel/apic/apic_noop.c
20824+++ b/arch/x86/kernel/apic/apic_noop.c
20825@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20826 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20827 }
20828
20829-struct apic apic_noop = {
20830+struct apic apic_noop __read_only = {
20831 .name = "noop",
20832 .probe = noop_probe,
20833 .acpi_madt_oem_check = NULL,
20834diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20835index c4a8d63..fe893ac 100644
20836--- a/arch/x86/kernel/apic/bigsmp_32.c
20837+++ b/arch/x86/kernel/apic/bigsmp_32.c
20838@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20839 return dmi_bigsmp;
20840 }
20841
20842-static struct apic apic_bigsmp = {
20843+static struct apic apic_bigsmp __read_only = {
20844
20845 .name = "bigsmp",
20846 .probe = probe_bigsmp,
20847diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20848index f4dc246..fbab133 100644
20849--- a/arch/x86/kernel/apic/io_apic.c
20850+++ b/arch/x86/kernel/apic/io_apic.c
20851@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20852 return ret;
20853 }
20854
20855-atomic_t irq_mis_count;
20856+atomic_unchecked_t irq_mis_count;
20857
20858 #ifdef CONFIG_GENERIC_PENDING_IRQ
20859 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20860@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20861 * at the cpu.
20862 */
20863 if (!(v & (1 << (i & 0x1f)))) {
20864- atomic_inc(&irq_mis_count);
20865+ atomic_inc_unchecked(&irq_mis_count);
20866
20867 eoi_ioapic_irq(irq, cfg);
20868 }
20869@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20870 ioapic_irqd_unmask(data, cfg, masked);
20871 }
20872
20873-static struct irq_chip ioapic_chip __read_mostly = {
20874+static struct irq_chip ioapic_chip = {
20875 .name = "IO-APIC",
20876 .irq_startup = startup_ioapic_irq,
20877 .irq_mask = mask_ioapic_irq,
20878@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20879 ack_APIC_irq();
20880 }
20881
20882-static struct irq_chip lapic_chip __read_mostly = {
20883+static struct irq_chip lapic_chip = {
20884 .name = "local-APIC",
20885 .irq_mask = mask_lapic_irq,
20886 .irq_unmask = unmask_lapic_irq,
20887diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20888index bda4886..f9c7195 100644
20889--- a/arch/x86/kernel/apic/probe_32.c
20890+++ b/arch/x86/kernel/apic/probe_32.c
20891@@ -72,7 +72,7 @@ static int probe_default(void)
20892 return 1;
20893 }
20894
20895-static struct apic apic_default = {
20896+static struct apic apic_default __read_only = {
20897
20898 .name = "default",
20899 .probe = probe_default,
20900diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20901index 6cedd79..023ff8e 100644
20902--- a/arch/x86/kernel/apic/vector.c
20903+++ b/arch/x86/kernel/apic/vector.c
20904@@ -21,7 +21,7 @@
20905
20906 static DEFINE_RAW_SPINLOCK(vector_lock);
20907
20908-void lock_vector_lock(void)
20909+void lock_vector_lock(void) __acquires(vector_lock)
20910 {
20911 /* Used to the online set of cpus does not change
20912 * during assign_irq_vector.
20913@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20914 raw_spin_lock(&vector_lock);
20915 }
20916
20917-void unlock_vector_lock(void)
20918+void unlock_vector_lock(void) __releases(vector_lock)
20919 {
20920 raw_spin_unlock(&vector_lock);
20921 }
20922diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20923index e658f21..b695a1a 100644
20924--- a/arch/x86/kernel/apic/x2apic_cluster.c
20925+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20926@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20927 return notifier_from_errno(err);
20928 }
20929
20930-static struct notifier_block __refdata x2apic_cpu_notifier = {
20931+static struct notifier_block x2apic_cpu_notifier = {
20932 .notifier_call = update_clusterinfo,
20933 };
20934
20935@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20936 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20937 }
20938
20939-static struct apic apic_x2apic_cluster = {
20940+static struct apic apic_x2apic_cluster __read_only = {
20941
20942 .name = "cluster x2apic",
20943 .probe = x2apic_cluster_probe,
20944diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20945index 6fae733..5ca17af 100644
20946--- a/arch/x86/kernel/apic/x2apic_phys.c
20947+++ b/arch/x86/kernel/apic/x2apic_phys.c
20948@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20949 return apic == &apic_x2apic_phys;
20950 }
20951
20952-static struct apic apic_x2apic_phys = {
20953+static struct apic apic_x2apic_phys __read_only = {
20954
20955 .name = "physical x2apic",
20956 .probe = x2apic_phys_probe,
20957diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20958index 8e9dcfd..c61b3e4 100644
20959--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20960+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20961@@ -348,7 +348,7 @@ static int uv_probe(void)
20962 return apic == &apic_x2apic_uv_x;
20963 }
20964
20965-static struct apic __refdata apic_x2apic_uv_x = {
20966+static struct apic apic_x2apic_uv_x __read_only = {
20967
20968 .name = "UV large system",
20969 .probe = uv_probe,
20970diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20971index 927ec92..de68f32 100644
20972--- a/arch/x86/kernel/apm_32.c
20973+++ b/arch/x86/kernel/apm_32.c
20974@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20975 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20976 * even though they are called in protected mode.
20977 */
20978-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20979+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20980 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20981
20982 static const char driver_version[] = "1.16ac"; /* no spaces */
20983@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
20984 BUG_ON(cpu != 0);
20985 gdt = get_cpu_gdt_table(cpu);
20986 save_desc_40 = gdt[0x40 / 8];
20987+
20988+ pax_open_kernel();
20989 gdt[0x40 / 8] = bad_bios_desc;
20990+ pax_close_kernel();
20991
20992 apm_irq_save(flags);
20993 APM_DO_SAVE_SEGS;
20994@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
20995 &call->esi);
20996 APM_DO_RESTORE_SEGS;
20997 apm_irq_restore(flags);
20998+
20999+ pax_open_kernel();
21000 gdt[0x40 / 8] = save_desc_40;
21001+ pax_close_kernel();
21002+
21003 put_cpu();
21004
21005 return call->eax & 0xff;
21006@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21007 BUG_ON(cpu != 0);
21008 gdt = get_cpu_gdt_table(cpu);
21009 save_desc_40 = gdt[0x40 / 8];
21010+
21011+ pax_open_kernel();
21012 gdt[0x40 / 8] = bad_bios_desc;
21013+ pax_close_kernel();
21014
21015 apm_irq_save(flags);
21016 APM_DO_SAVE_SEGS;
21017@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21018 &call->eax);
21019 APM_DO_RESTORE_SEGS;
21020 apm_irq_restore(flags);
21021+
21022+ pax_open_kernel();
21023 gdt[0x40 / 8] = save_desc_40;
21024+ pax_close_kernel();
21025+
21026 put_cpu();
21027 return error;
21028 }
21029@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21030 return 0;
21031 }
21032
21033-static struct dmi_system_id __initdata apm_dmi_table[] = {
21034+static const struct dmi_system_id __initconst apm_dmi_table[] = {
21035 {
21036 print_if_true,
21037 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21038@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21039 * code to that CPU.
21040 */
21041 gdt = get_cpu_gdt_table(0);
21042+
21043+ pax_open_kernel();
21044 set_desc_base(&gdt[APM_CS >> 3],
21045 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21046 set_desc_base(&gdt[APM_CS_16 >> 3],
21047 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21048 set_desc_base(&gdt[APM_DS >> 3],
21049 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21050+ pax_close_kernel();
21051
21052 proc_create("apm", 0, NULL, &apm_file_ops);
21053
21054diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21055index 9f6b934..cf5ffb3 100644
21056--- a/arch/x86/kernel/asm-offsets.c
21057+++ b/arch/x86/kernel/asm-offsets.c
21058@@ -32,6 +32,8 @@ void common(void) {
21059 OFFSET(TI_flags, thread_info, flags);
21060 OFFSET(TI_status, thread_info, status);
21061 OFFSET(TI_addr_limit, thread_info, addr_limit);
21062+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21063+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21064
21065 BLANK();
21066 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21067@@ -52,8 +54,26 @@ void common(void) {
21068 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21069 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21070 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21071+
21072+#ifdef CONFIG_PAX_KERNEXEC
21073+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21074 #endif
21075
21076+#ifdef CONFIG_PAX_MEMORY_UDEREF
21077+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21078+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21079+#ifdef CONFIG_X86_64
21080+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21081+#endif
21082+#endif
21083+
21084+#endif
21085+
21086+ BLANK();
21087+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21088+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21089+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21090+
21091 #ifdef CONFIG_XEN
21092 BLANK();
21093 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21094diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21095index fdcbb4d..036dd93 100644
21096--- a/arch/x86/kernel/asm-offsets_64.c
21097+++ b/arch/x86/kernel/asm-offsets_64.c
21098@@ -80,6 +80,7 @@ int main(void)
21099 BLANK();
21100 #undef ENTRY
21101
21102+ DEFINE(TSS_size, sizeof(struct tss_struct));
21103 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21104 BLANK();
21105
21106diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21107index 80091ae..0c5184f 100644
21108--- a/arch/x86/kernel/cpu/Makefile
21109+++ b/arch/x86/kernel/cpu/Makefile
21110@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21111 CFLAGS_REMOVE_perf_event.o = -pg
21112 endif
21113
21114-# Make sure load_percpu_segment has no stackprotector
21115-nostackp := $(call cc-option, -fno-stack-protector)
21116-CFLAGS_common.o := $(nostackp)
21117-
21118 obj-y := intel_cacheinfo.o scattered.o topology.o
21119 obj-y += common.o
21120 obj-y += rdrand.o
21121diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21122index a220239..607fc38 100644
21123--- a/arch/x86/kernel/cpu/amd.c
21124+++ b/arch/x86/kernel/cpu/amd.c
21125@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21126 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21127 {
21128 /* AMD errata T13 (order #21922) */
21129- if ((c->x86 == 6)) {
21130+ if (c->x86 == 6) {
21131 /* Duron Rev A0 */
21132 if (c->x86_model == 3 && c->x86_mask == 0)
21133 size = 64;
21134diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21135index 2346c95..c061472 100644
21136--- a/arch/x86/kernel/cpu/common.c
21137+++ b/arch/x86/kernel/cpu/common.c
21138@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21139
21140 static const struct cpu_dev *this_cpu = &default_cpu;
21141
21142-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21143-#ifdef CONFIG_X86_64
21144- /*
21145- * We need valid kernel segments for data and code in long mode too
21146- * IRET will check the segment types kkeil 2000/10/28
21147- * Also sysret mandates a special GDT layout
21148- *
21149- * TLS descriptors are currently at a different place compared to i386.
21150- * Hopefully nobody expects them at a fixed place (Wine?)
21151- */
21152- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21153- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21154- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21155- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21156- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21157- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21158-#else
21159- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21160- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21161- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21162- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21163- /*
21164- * Segments used for calling PnP BIOS have byte granularity.
21165- * They code segments and data segments have fixed 64k limits,
21166- * the transfer segment sizes are set at run time.
21167- */
21168- /* 32-bit code */
21169- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21170- /* 16-bit code */
21171- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21172- /* 16-bit data */
21173- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21174- /* 16-bit data */
21175- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21176- /* 16-bit data */
21177- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21178- /*
21179- * The APM segments have byte granularity and their bases
21180- * are set at run time. All have 64k limits.
21181- */
21182- /* 32-bit code */
21183- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21184- /* 16-bit code */
21185- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21186- /* data */
21187- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21188-
21189- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21190- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21191- GDT_STACK_CANARY_INIT
21192-#endif
21193-} };
21194-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21195-
21196 static int __init x86_xsave_setup(char *s)
21197 {
21198 if (strlen(s))
21199@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21200 }
21201 }
21202
21203+#ifdef CONFIG_X86_64
21204+static __init int setup_disable_pcid(char *arg)
21205+{
21206+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21207+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21208+
21209+#ifdef CONFIG_PAX_MEMORY_UDEREF
21210+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21211+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21212+#endif
21213+
21214+ return 1;
21215+}
21216+__setup("nopcid", setup_disable_pcid);
21217+
21218+static void setup_pcid(struct cpuinfo_x86 *c)
21219+{
21220+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21221+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21222+
21223+#ifdef CONFIG_PAX_MEMORY_UDEREF
21224+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21225+ pax_open_kernel();
21226+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21227+ pax_close_kernel();
21228+ printk("PAX: slow and weak UDEREF enabled\n");
21229+ } else
21230+ printk("PAX: UDEREF disabled\n");
21231+#endif
21232+
21233+ return;
21234+ }
21235+
21236+ printk("PAX: PCID detected\n");
21237+ cr4_set_bits(X86_CR4_PCIDE);
21238+
21239+#ifdef CONFIG_PAX_MEMORY_UDEREF
21240+ pax_open_kernel();
21241+ clone_pgd_mask = ~(pgdval_t)0UL;
21242+ pax_close_kernel();
21243+ if (pax_user_shadow_base)
21244+ printk("PAX: weak UDEREF enabled\n");
21245+ else {
21246+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21247+ printk("PAX: strong UDEREF enabled\n");
21248+ }
21249+#endif
21250+
21251+ if (cpu_has(c, X86_FEATURE_INVPCID))
21252+ printk("PAX: INVPCID detected\n");
21253+}
21254+#endif
21255+
21256 /*
21257 * Some CPU features depend on higher CPUID levels, which may not always
21258 * be available due to CPUID level capping or broken virtualization
21259@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21260 {
21261 struct desc_ptr gdt_descr;
21262
21263- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21264+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21265 gdt_descr.size = GDT_SIZE - 1;
21266 load_gdt(&gdt_descr);
21267 /* Reload the per-cpu base */
21268@@ -897,6 +896,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21269 setup_smep(c);
21270 setup_smap(c);
21271
21272+#ifdef CONFIG_X86_32
21273+#ifdef CONFIG_PAX_PAGEEXEC
21274+ if (!(__supported_pte_mask & _PAGE_NX))
21275+ clear_cpu_cap(c, X86_FEATURE_PSE);
21276+#endif
21277+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21278+ clear_cpu_cap(c, X86_FEATURE_SEP);
21279+#endif
21280+#endif
21281+
21282+#ifdef CONFIG_X86_64
21283+ setup_pcid(c);
21284+#endif
21285+
21286 /*
21287 * The vendor-specific functions might have changed features.
21288 * Now we do "generic changes."
21289@@ -979,7 +992,7 @@ static void syscall32_cpu_init(void)
21290 void enable_sep_cpu(void)
21291 {
21292 int cpu = get_cpu();
21293- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21294+ struct tss_struct *tss = init_tss + cpu;
21295
21296 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21297 put_cpu();
21298@@ -1117,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
21299 }
21300 __setup("clearcpuid=", setup_disablecpuid);
21301
21302+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21303+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21304+
21305 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21306- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21307+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21308 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21309
21310 #ifdef CONFIG_X86_64
21311-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21312-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21313- (unsigned long) debug_idt_table };
21314+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21315+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21316
21317 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21318 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21319@@ -1307,7 +1322,7 @@ void cpu_init(void)
21320 */
21321 load_ucode_ap();
21322
21323- t = &per_cpu(init_tss, cpu);
21324+ t = init_tss + cpu;
21325 oist = &per_cpu(orig_ist, cpu);
21326
21327 #ifdef CONFIG_NUMA
21328@@ -1339,7 +1354,6 @@ void cpu_init(void)
21329 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21330 barrier();
21331
21332- x86_configure_nx();
21333 x2apic_setup();
21334
21335 /*
21336@@ -1391,7 +1405,7 @@ void cpu_init(void)
21337 {
21338 int cpu = smp_processor_id();
21339 struct task_struct *curr = current;
21340- struct tss_struct *t = &per_cpu(init_tss, cpu);
21341+ struct tss_struct *t = init_tss + cpu;
21342 struct thread_struct *thread = &curr->thread;
21343
21344 wait_for_master_cpu(cpu);
21345diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21346index 6596433..1ad6eaf 100644
21347--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21348+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21349@@ -1024,6 +1024,22 @@ static struct attribute *default_attrs[] = {
21350 };
21351
21352 #ifdef CONFIG_AMD_NB
21353+static struct attribute *default_attrs_amd_nb[] = {
21354+ &type.attr,
21355+ &level.attr,
21356+ &coherency_line_size.attr,
21357+ &physical_line_partition.attr,
21358+ &ways_of_associativity.attr,
21359+ &number_of_sets.attr,
21360+ &size.attr,
21361+ &shared_cpu_map.attr,
21362+ &shared_cpu_list.attr,
21363+ NULL,
21364+ NULL,
21365+ NULL,
21366+ NULL
21367+};
21368+
21369 static struct attribute **amd_l3_attrs(void)
21370 {
21371 static struct attribute **attrs;
21372@@ -1034,18 +1050,7 @@ static struct attribute **amd_l3_attrs(void)
21373
21374 n = ARRAY_SIZE(default_attrs);
21375
21376- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21377- n += 2;
21378-
21379- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21380- n += 1;
21381-
21382- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21383- if (attrs == NULL)
21384- return attrs = default_attrs;
21385-
21386- for (n = 0; default_attrs[n]; n++)
21387- attrs[n] = default_attrs[n];
21388+ attrs = default_attrs_amd_nb;
21389
21390 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21391 attrs[n++] = &cache_disable_0.attr;
21392@@ -1096,6 +1101,13 @@ static struct kobj_type ktype_cache = {
21393 .default_attrs = default_attrs,
21394 };
21395
21396+#ifdef CONFIG_AMD_NB
21397+static struct kobj_type ktype_cache_amd_nb = {
21398+ .sysfs_ops = &sysfs_ops,
21399+ .default_attrs = default_attrs_amd_nb,
21400+};
21401+#endif
21402+
21403 static struct kobj_type ktype_percpu_entry = {
21404 .sysfs_ops = &sysfs_ops,
21405 };
21406@@ -1161,20 +1173,26 @@ static int cache_add_dev(struct device *dev)
21407 return retval;
21408 }
21409
21410+#ifdef CONFIG_AMD_NB
21411+ amd_l3_attrs();
21412+#endif
21413+
21414 for (i = 0; i < num_cache_leaves; i++) {
21415+ struct kobj_type *ktype;
21416+
21417 this_object = INDEX_KOBJECT_PTR(cpu, i);
21418 this_object->cpu = cpu;
21419 this_object->index = i;
21420
21421 this_leaf = CPUID4_INFO_IDX(cpu, i);
21422
21423- ktype_cache.default_attrs = default_attrs;
21424+ ktype = &ktype_cache;
21425 #ifdef CONFIG_AMD_NB
21426 if (this_leaf->base.nb)
21427- ktype_cache.default_attrs = amd_l3_attrs();
21428+ ktype = &ktype_cache_amd_nb;
21429 #endif
21430 retval = kobject_init_and_add(&(this_object->kobj),
21431- &ktype_cache,
21432+ ktype,
21433 per_cpu(ici_cache_kobject, cpu),
21434 "index%1lu", i);
21435 if (unlikely(retval)) {
21436diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21437index 3c036cb..3b5677d 100644
21438--- a/arch/x86/kernel/cpu/mcheck/mce.c
21439+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21440@@ -47,6 +47,7 @@
21441 #include <asm/tlbflush.h>
21442 #include <asm/mce.h>
21443 #include <asm/msr.h>
21444+#include <asm/local.h>
21445
21446 #include "mce-internal.h"
21447
21448@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21449 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21450 m->cs, m->ip);
21451
21452- if (m->cs == __KERNEL_CS)
21453+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21454 print_symbol("{%s}", m->ip);
21455 pr_cont("\n");
21456 }
21457@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21458
21459 #define PANIC_TIMEOUT 5 /* 5 seconds */
21460
21461-static atomic_t mce_panicked;
21462+static atomic_unchecked_t mce_panicked;
21463
21464 static int fake_panic;
21465-static atomic_t mce_fake_panicked;
21466+static atomic_unchecked_t mce_fake_panicked;
21467
21468 /* Panic in progress. Enable interrupts and wait for final IPI */
21469 static void wait_for_panic(void)
21470@@ -318,7 +319,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21471 /*
21472 * Make sure only one CPU runs in machine check panic
21473 */
21474- if (atomic_inc_return(&mce_panicked) > 1)
21475+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21476 wait_for_panic();
21477 barrier();
21478
21479@@ -326,7 +327,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21480 console_verbose();
21481 } else {
21482 /* Don't log too much for fake panic */
21483- if (atomic_inc_return(&mce_fake_panicked) > 1)
21484+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21485 return;
21486 }
21487 /* First print corrected ones that are still unlogged */
21488@@ -365,7 +366,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21489 if (!fake_panic) {
21490 if (panic_timeout == 0)
21491 panic_timeout = mca_cfg.panic_timeout;
21492- panic(msg);
21493+ panic("%s", msg);
21494 } else
21495 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21496 }
21497@@ -743,7 +744,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21498 * might have been modified by someone else.
21499 */
21500 rmb();
21501- if (atomic_read(&mce_panicked))
21502+ if (atomic_read_unchecked(&mce_panicked))
21503 wait_for_panic();
21504 if (!mca_cfg.monarch_timeout)
21505 goto out;
21506@@ -1669,7 +1670,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21507 }
21508
21509 /* Call the installed machine check handler for this CPU setup. */
21510-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21511+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21512 unexpected_machine_check;
21513
21514 /*
21515@@ -1692,7 +1693,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21516 return;
21517 }
21518
21519+ pax_open_kernel();
21520 machine_check_vector = do_machine_check;
21521+ pax_close_kernel();
21522
21523 __mcheck_cpu_init_generic();
21524 __mcheck_cpu_init_vendor(c);
21525@@ -1706,7 +1709,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21526 */
21527
21528 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21529-static int mce_chrdev_open_count; /* #times opened */
21530+static local_t mce_chrdev_open_count; /* #times opened */
21531 static int mce_chrdev_open_exclu; /* already open exclusive? */
21532
21533 static int mce_chrdev_open(struct inode *inode, struct file *file)
21534@@ -1714,7 +1717,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21535 spin_lock(&mce_chrdev_state_lock);
21536
21537 if (mce_chrdev_open_exclu ||
21538- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21539+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21540 spin_unlock(&mce_chrdev_state_lock);
21541
21542 return -EBUSY;
21543@@ -1722,7 +1725,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21544
21545 if (file->f_flags & O_EXCL)
21546 mce_chrdev_open_exclu = 1;
21547- mce_chrdev_open_count++;
21548+ local_inc(&mce_chrdev_open_count);
21549
21550 spin_unlock(&mce_chrdev_state_lock);
21551
21552@@ -1733,7 +1736,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21553 {
21554 spin_lock(&mce_chrdev_state_lock);
21555
21556- mce_chrdev_open_count--;
21557+ local_dec(&mce_chrdev_open_count);
21558 mce_chrdev_open_exclu = 0;
21559
21560 spin_unlock(&mce_chrdev_state_lock);
21561@@ -2408,7 +2411,7 @@ static __init void mce_init_banks(void)
21562
21563 for (i = 0; i < mca_cfg.banks; i++) {
21564 struct mce_bank *b = &mce_banks[i];
21565- struct device_attribute *a = &b->attr;
21566+ device_attribute_no_const *a = &b->attr;
21567
21568 sysfs_attr_init(&a->attr);
21569 a->attr.name = b->attrname;
21570@@ -2515,7 +2518,7 @@ struct dentry *mce_get_debugfs_dir(void)
21571 static void mce_reset(void)
21572 {
21573 cpu_missing = 0;
21574- atomic_set(&mce_fake_panicked, 0);
21575+ atomic_set_unchecked(&mce_fake_panicked, 0);
21576 atomic_set(&mce_executing, 0);
21577 atomic_set(&mce_callin, 0);
21578 atomic_set(&global_nwo, 0);
21579diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21580index 737b0ad..09ec66e 100644
21581--- a/arch/x86/kernel/cpu/mcheck/p5.c
21582+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21583@@ -12,6 +12,7 @@
21584 #include <asm/tlbflush.h>
21585 #include <asm/mce.h>
21586 #include <asm/msr.h>
21587+#include <asm/pgtable.h>
21588
21589 /* By default disabled */
21590 int mce_p5_enabled __read_mostly;
21591@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21592 if (!cpu_has(c, X86_FEATURE_MCE))
21593 return;
21594
21595+ pax_open_kernel();
21596 machine_check_vector = pentium_machine_check;
21597+ pax_close_kernel();
21598 /* Make sure the vector pointer is visible before we enable MCEs: */
21599 wmb();
21600
21601diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21602index 44f1382..315b292 100644
21603--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21604+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21605@@ -11,6 +11,7 @@
21606 #include <asm/tlbflush.h>
21607 #include <asm/mce.h>
21608 #include <asm/msr.h>
21609+#include <asm/pgtable.h>
21610
21611 /* Machine check handler for WinChip C6: */
21612 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21613@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21614 {
21615 u32 lo, hi;
21616
21617+ pax_open_kernel();
21618 machine_check_vector = winchip_machine_check;
21619+ pax_close_kernel();
21620 /* Make sure the vector pointer is visible before we enable MCEs: */
21621 wmb();
21622
21623diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21624index 36a8361..e7058c2 100644
21625--- a/arch/x86/kernel/cpu/microcode/core.c
21626+++ b/arch/x86/kernel/cpu/microcode/core.c
21627@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21628 return NOTIFY_OK;
21629 }
21630
21631-static struct notifier_block __refdata mc_cpu_notifier = {
21632+static struct notifier_block mc_cpu_notifier = {
21633 .notifier_call = mc_cpu_callback,
21634 };
21635
21636diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21637index 746e7fd..8dc677e 100644
21638--- a/arch/x86/kernel/cpu/microcode/intel.c
21639+++ b/arch/x86/kernel/cpu/microcode/intel.c
21640@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21641
21642 static int get_ucode_user(void *to, const void *from, size_t n)
21643 {
21644- return copy_from_user(to, from, n);
21645+ return copy_from_user(to, (const void __force_user *)from, n);
21646 }
21647
21648 static enum ucode_state
21649 request_microcode_user(int cpu, const void __user *buf, size_t size)
21650 {
21651- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21652+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21653 }
21654
21655 static void microcode_fini_cpu(int cpu)
21656diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21657index ea5f363..cb0e905 100644
21658--- a/arch/x86/kernel/cpu/mtrr/main.c
21659+++ b/arch/x86/kernel/cpu/mtrr/main.c
21660@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21661 u64 size_or_mask, size_and_mask;
21662 static bool mtrr_aps_delayed_init;
21663
21664-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21665+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21666
21667 const struct mtrr_ops *mtrr_if;
21668
21669diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21670index df5e41f..816c719 100644
21671--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21672+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21673@@ -25,7 +25,7 @@ struct mtrr_ops {
21674 int (*validate_add_page)(unsigned long base, unsigned long size,
21675 unsigned int type);
21676 int (*have_wrcomb)(void);
21677-};
21678+} __do_const;
21679
21680 extern int generic_get_free_region(unsigned long base, unsigned long size,
21681 int replace_reg);
21682diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21683index b71a7f8..534af0e 100644
21684--- a/arch/x86/kernel/cpu/perf_event.c
21685+++ b/arch/x86/kernel/cpu/perf_event.c
21686@@ -1376,7 +1376,7 @@ static void __init pmu_check_apic(void)
21687
21688 }
21689
21690-static struct attribute_group x86_pmu_format_group = {
21691+static attribute_group_no_const x86_pmu_format_group = {
21692 .name = "format",
21693 .attrs = NULL,
21694 };
21695@@ -1475,7 +1475,7 @@ static struct attribute *events_attr[] = {
21696 NULL,
21697 };
21698
21699-static struct attribute_group x86_pmu_events_group = {
21700+static attribute_group_no_const x86_pmu_events_group = {
21701 .name = "events",
21702 .attrs = events_attr,
21703 };
21704@@ -2037,7 +2037,7 @@ static unsigned long get_segment_base(unsigned int segment)
21705 if (idx > GDT_ENTRIES)
21706 return 0;
21707
21708- desc = raw_cpu_ptr(gdt_page.gdt);
21709+ desc = get_cpu_gdt_table(smp_processor_id());
21710 }
21711
21712 return get_desc_base(desc + idx);
21713@@ -2127,7 +2127,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21714 break;
21715
21716 perf_callchain_store(entry, frame.return_address);
21717- fp = frame.next_frame;
21718+ fp = (const void __force_user *)frame.next_frame;
21719 }
21720 }
21721
21722diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21723index 97242a9..cf9c30e 100644
21724--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21725+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21726@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21727 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21728 {
21729 struct attribute **attrs;
21730- struct attribute_group *attr_group;
21731+ attribute_group_no_const *attr_group;
21732 int i = 0, j;
21733
21734 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21735diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21736index 2589906..1ca1000 100644
21737--- a/arch/x86/kernel/cpu/perf_event_intel.c
21738+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21739@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21740 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21741
21742 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21743- u64 capabilities;
21744+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21745
21746- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21747- x86_pmu.intel_cap.capabilities = capabilities;
21748+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21749+ x86_pmu.intel_cap.capabilities = capabilities;
21750 }
21751
21752 intel_ds_init();
21753diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21754index c4bb8b8..9f7384d 100644
21755--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21756+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21757@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21758 NULL,
21759 };
21760
21761-static struct attribute_group rapl_pmu_events_group = {
21762+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21763 .name = "events",
21764 .attrs = NULL, /* patched at runtime */
21765 };
21766diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21767index c635b8b..b78835e 100644
21768--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21769+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21770@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21771 static int __init uncore_type_init(struct intel_uncore_type *type)
21772 {
21773 struct intel_uncore_pmu *pmus;
21774- struct attribute_group *attr_group;
21775+ attribute_group_no_const *attr_group;
21776 struct attribute **attrs;
21777 int i, j;
21778
21779diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21780index 6c8c1e7..515b98a 100644
21781--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21782+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21783@@ -114,7 +114,7 @@ struct intel_uncore_box {
21784 struct uncore_event_desc {
21785 struct kobj_attribute attr;
21786 const char *config;
21787-};
21788+} __do_const;
21789
21790 ssize_t uncore_event_show(struct kobject *kobj,
21791 struct kobj_attribute *attr, char *buf);
21792diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21793index 83741a7..bd3507d 100644
21794--- a/arch/x86/kernel/cpuid.c
21795+++ b/arch/x86/kernel/cpuid.c
21796@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21797 return notifier_from_errno(err);
21798 }
21799
21800-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21801+static struct notifier_block cpuid_class_cpu_notifier =
21802 {
21803 .notifier_call = cpuid_class_cpu_callback,
21804 };
21805diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21806index aceb2f9..c76d3e3 100644
21807--- a/arch/x86/kernel/crash.c
21808+++ b/arch/x86/kernel/crash.c
21809@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21810 #ifdef CONFIG_X86_32
21811 struct pt_regs fixed_regs;
21812
21813- if (!user_mode_vm(regs)) {
21814+ if (!user_mode(regs)) {
21815 crash_fixup_ss_esp(&fixed_regs, regs);
21816 regs = &fixed_regs;
21817 }
21818diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21819index afa64ad..dce67dd 100644
21820--- a/arch/x86/kernel/crash_dump_64.c
21821+++ b/arch/x86/kernel/crash_dump_64.c
21822@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21823 return -ENOMEM;
21824
21825 if (userbuf) {
21826- if (copy_to_user(buf, vaddr + offset, csize)) {
21827+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21828 iounmap(vaddr);
21829 return -EFAULT;
21830 }
21831diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21832index f6dfd93..892ade4 100644
21833--- a/arch/x86/kernel/doublefault.c
21834+++ b/arch/x86/kernel/doublefault.c
21835@@ -12,7 +12,7 @@
21836
21837 #define DOUBLEFAULT_STACKSIZE (1024)
21838 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21839-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21840+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21841
21842 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21843
21844@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21845 unsigned long gdt, tss;
21846
21847 native_store_gdt(&gdt_desc);
21848- gdt = gdt_desc.address;
21849+ gdt = (unsigned long)gdt_desc.address;
21850
21851 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21852
21853@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21854 /* 0x2 bit is always set */
21855 .flags = X86_EFLAGS_SF | 0x2,
21856 .sp = STACK_START,
21857- .es = __USER_DS,
21858+ .es = __KERNEL_DS,
21859 .cs = __KERNEL_CS,
21860 .ss = __KERNEL_DS,
21861- .ds = __USER_DS,
21862+ .ds = __KERNEL_DS,
21863 .fs = __KERNEL_PERCPU,
21864
21865 .__cr3 = __pa_nodebug(swapper_pg_dir),
21866diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21867index cf3df1d..b637d9a 100644
21868--- a/arch/x86/kernel/dumpstack.c
21869+++ b/arch/x86/kernel/dumpstack.c
21870@@ -2,6 +2,9 @@
21871 * Copyright (C) 1991, 1992 Linus Torvalds
21872 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21873 */
21874+#ifdef CONFIG_GRKERNSEC_HIDESYM
21875+#define __INCLUDED_BY_HIDESYM 1
21876+#endif
21877 #include <linux/kallsyms.h>
21878 #include <linux/kprobes.h>
21879 #include <linux/uaccess.h>
21880@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21881
21882 void printk_address(unsigned long address)
21883 {
21884- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21885+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21886 }
21887
21888 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21889 static void
21890 print_ftrace_graph_addr(unsigned long addr, void *data,
21891 const struct stacktrace_ops *ops,
21892- struct thread_info *tinfo, int *graph)
21893+ struct task_struct *task, int *graph)
21894 {
21895- struct task_struct *task;
21896 unsigned long ret_addr;
21897 int index;
21898
21899 if (addr != (unsigned long)return_to_handler)
21900 return;
21901
21902- task = tinfo->task;
21903 index = task->curr_ret_stack;
21904
21905 if (!task->ret_stack || index < *graph)
21906@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21907 static inline void
21908 print_ftrace_graph_addr(unsigned long addr, void *data,
21909 const struct stacktrace_ops *ops,
21910- struct thread_info *tinfo, int *graph)
21911+ struct task_struct *task, int *graph)
21912 { }
21913 #endif
21914
21915@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21916 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21917 */
21918
21919-static inline int valid_stack_ptr(struct thread_info *tinfo,
21920- void *p, unsigned int size, void *end)
21921+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21922 {
21923- void *t = tinfo;
21924 if (end) {
21925 if (p < end && p >= (end-THREAD_SIZE))
21926 return 1;
21927@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21928 }
21929
21930 unsigned long
21931-print_context_stack(struct thread_info *tinfo,
21932+print_context_stack(struct task_struct *task, void *stack_start,
21933 unsigned long *stack, unsigned long bp,
21934 const struct stacktrace_ops *ops, void *data,
21935 unsigned long *end, int *graph)
21936 {
21937 struct stack_frame *frame = (struct stack_frame *)bp;
21938
21939- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21940+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21941 unsigned long addr;
21942
21943 addr = *stack;
21944@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21945 } else {
21946 ops->address(data, addr, 0);
21947 }
21948- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21949+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21950 }
21951 stack++;
21952 }
21953@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21954 EXPORT_SYMBOL_GPL(print_context_stack);
21955
21956 unsigned long
21957-print_context_stack_bp(struct thread_info *tinfo,
21958+print_context_stack_bp(struct task_struct *task, void *stack_start,
21959 unsigned long *stack, unsigned long bp,
21960 const struct stacktrace_ops *ops, void *data,
21961 unsigned long *end, int *graph)
21962@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21963 struct stack_frame *frame = (struct stack_frame *)bp;
21964 unsigned long *ret_addr = &frame->return_address;
21965
21966- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21967+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21968 unsigned long addr = *ret_addr;
21969
21970 if (!__kernel_text_address(addr))
21971@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21972 ops->address(data, addr, 1);
21973 frame = frame->next_frame;
21974 ret_addr = &frame->return_address;
21975- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21976+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21977 }
21978
21979 return (unsigned long)frame;
21980@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
21981 static void print_trace_address(void *data, unsigned long addr, int reliable)
21982 {
21983 touch_nmi_watchdog();
21984- printk(data);
21985+ printk("%s", (char *)data);
21986 printk_stack_address(addr, reliable);
21987 }
21988
21989@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
21990 EXPORT_SYMBOL_GPL(oops_begin);
21991 NOKPROBE_SYMBOL(oops_begin);
21992
21993+extern void gr_handle_kernel_exploit(void);
21994+
21995 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21996 {
21997 if (regs && kexec_should_crash(current))
21998@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
21999 panic("Fatal exception in interrupt");
22000 if (panic_on_oops)
22001 panic("Fatal exception");
22002- do_exit(signr);
22003+
22004+ gr_handle_kernel_exploit();
22005+
22006+ do_group_exit(signr);
22007 }
22008 NOKPROBE_SYMBOL(oops_end);
22009
22010@@ -278,7 +282,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22011 print_modules();
22012 show_regs(regs);
22013 #ifdef CONFIG_X86_32
22014- if (user_mode_vm(regs)) {
22015+ if (user_mode(regs)) {
22016 sp = regs->sp;
22017 ss = regs->ss & 0xffff;
22018 } else {
22019@@ -307,7 +311,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22020 unsigned long flags = oops_begin();
22021 int sig = SIGSEGV;
22022
22023- if (!user_mode_vm(regs))
22024+ if (!user_mode(regs))
22025 report_bug(regs->ip, regs);
22026
22027 if (__die(str, regs, err))
22028diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22029index 5abd4cd..c65733b 100644
22030--- a/arch/x86/kernel/dumpstack_32.c
22031+++ b/arch/x86/kernel/dumpstack_32.c
22032@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22033 bp = stack_frame(task, regs);
22034
22035 for (;;) {
22036- struct thread_info *context;
22037+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22038 void *end_stack;
22039
22040 end_stack = is_hardirq_stack(stack, cpu);
22041 if (!end_stack)
22042 end_stack = is_softirq_stack(stack, cpu);
22043
22044- context = task_thread_info(task);
22045- bp = ops->walk_stack(context, stack, bp, ops, data,
22046+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22047 end_stack, &graph);
22048
22049 /* Stop if not on irq stack */
22050@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22051 int i;
22052
22053 show_regs_print_info(KERN_EMERG);
22054- __show_regs(regs, !user_mode_vm(regs));
22055+ __show_regs(regs, !user_mode(regs));
22056
22057 /*
22058 * When in-kernel, we also print out the stack and code at the
22059 * time of the fault..
22060 */
22061- if (!user_mode_vm(regs)) {
22062+ if (!user_mode(regs)) {
22063 unsigned int code_prologue = code_bytes * 43 / 64;
22064 unsigned int code_len = code_bytes;
22065 unsigned char c;
22066 u8 *ip;
22067+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22068
22069 pr_emerg("Stack:\n");
22070 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22071
22072 pr_emerg("Code:");
22073
22074- ip = (u8 *)regs->ip - code_prologue;
22075+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22076 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22077 /* try starting at IP */
22078- ip = (u8 *)regs->ip;
22079+ ip = (u8 *)regs->ip + cs_base;
22080 code_len = code_len - code_prologue + 1;
22081 }
22082 for (i = 0; i < code_len; i++, ip++) {
22083@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22084 pr_cont(" Bad EIP value.");
22085 break;
22086 }
22087- if (ip == (u8 *)regs->ip)
22088+ if (ip == (u8 *)regs->ip + cs_base)
22089 pr_cont(" <%02x>", c);
22090 else
22091 pr_cont(" %02x", c);
22092@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22093 {
22094 unsigned short ud2;
22095
22096+ ip = ktla_ktva(ip);
22097 if (ip < PAGE_OFFSET)
22098 return 0;
22099 if (probe_kernel_address((unsigned short *)ip, ud2))
22100@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22101
22102 return ud2 == 0x0b0f;
22103 }
22104+
22105+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22106+void pax_check_alloca(unsigned long size)
22107+{
22108+ unsigned long sp = (unsigned long)&sp, stack_left;
22109+
22110+ /* all kernel stacks are of the same size */
22111+ stack_left = sp & (THREAD_SIZE - 1);
22112+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22113+}
22114+EXPORT_SYMBOL(pax_check_alloca);
22115+#endif
22116diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22117index ff86f19..73eabf4 100644
22118--- a/arch/x86/kernel/dumpstack_64.c
22119+++ b/arch/x86/kernel/dumpstack_64.c
22120@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22121 const struct stacktrace_ops *ops, void *data)
22122 {
22123 const unsigned cpu = get_cpu();
22124- struct thread_info *tinfo;
22125 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22126 unsigned long dummy;
22127 unsigned used = 0;
22128 int graph = 0;
22129 int done = 0;
22130+ void *stack_start;
22131
22132 if (!task)
22133 task = current;
22134@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22135 * current stack address. If the stacks consist of nested
22136 * exceptions
22137 */
22138- tinfo = task_thread_info(task);
22139 while (!done) {
22140 unsigned long *stack_end;
22141 enum stack_type stype;
22142@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22143 if (ops->stack(data, id) < 0)
22144 break;
22145
22146- bp = ops->walk_stack(tinfo, stack, bp, ops,
22147+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22148 data, stack_end, &graph);
22149 ops->stack(data, "<EOE>");
22150 /*
22151@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22152 * second-to-last pointer (index -2 to end) in the
22153 * exception stack:
22154 */
22155+ if ((u16)stack_end[-1] != __KERNEL_DS)
22156+ goto out;
22157 stack = (unsigned long *) stack_end[-2];
22158 done = 0;
22159 break;
22160@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22161
22162 if (ops->stack(data, "IRQ") < 0)
22163 break;
22164- bp = ops->walk_stack(tinfo, stack, bp,
22165+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22166 ops, data, stack_end, &graph);
22167 /*
22168 * We link to the next stack (which would be
22169@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22170 /*
22171 * This handles the process stack:
22172 */
22173- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22174+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22175+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22176+out:
22177 put_cpu();
22178 }
22179 EXPORT_SYMBOL(dump_trace);
22180@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22181 {
22182 unsigned short ud2;
22183
22184- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22185+ if (probe_kernel_address((unsigned short *)ip, ud2))
22186 return 0;
22187
22188 return ud2 == 0x0b0f;
22189 }
22190+
22191+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22192+void pax_check_alloca(unsigned long size)
22193+{
22194+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22195+ unsigned cpu, used;
22196+ char *id;
22197+
22198+ /* check the process stack first */
22199+ stack_start = (unsigned long)task_stack_page(current);
22200+ stack_end = stack_start + THREAD_SIZE;
22201+ if (likely(stack_start <= sp && sp < stack_end)) {
22202+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22203+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22204+ return;
22205+ }
22206+
22207+ cpu = get_cpu();
22208+
22209+ /* check the irq stacks */
22210+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22211+ stack_start = stack_end - IRQ_STACK_SIZE;
22212+ if (stack_start <= sp && sp < stack_end) {
22213+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22214+ put_cpu();
22215+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22216+ return;
22217+ }
22218+
22219+ /* check the exception stacks */
22220+ used = 0;
22221+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22222+ stack_start = stack_end - EXCEPTION_STKSZ;
22223+ if (stack_end && stack_start <= sp && sp < stack_end) {
22224+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22225+ put_cpu();
22226+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22227+ return;
22228+ }
22229+
22230+ put_cpu();
22231+
22232+ /* unknown stack */
22233+ BUG();
22234+}
22235+EXPORT_SYMBOL(pax_check_alloca);
22236+#endif
22237diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22238index 46201de..ebffabf 100644
22239--- a/arch/x86/kernel/e820.c
22240+++ b/arch/x86/kernel/e820.c
22241@@ -794,8 +794,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22242
22243 static void early_panic(char *msg)
22244 {
22245- early_printk(msg);
22246- panic(msg);
22247+ early_printk("%s", msg);
22248+ panic("%s", msg);
22249 }
22250
22251 static int userdef __initdata;
22252diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22253index a62536a..8444df4 100644
22254--- a/arch/x86/kernel/early_printk.c
22255+++ b/arch/x86/kernel/early_printk.c
22256@@ -7,6 +7,7 @@
22257 #include <linux/pci_regs.h>
22258 #include <linux/pci_ids.h>
22259 #include <linux/errno.h>
22260+#include <linux/sched.h>
22261 #include <asm/io.h>
22262 #include <asm/processor.h>
22263 #include <asm/fcntl.h>
22264diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22265index 31e2d5b..b31c76d 100644
22266--- a/arch/x86/kernel/entry_32.S
22267+++ b/arch/x86/kernel/entry_32.S
22268@@ -177,13 +177,154 @@
22269 /*CFI_REL_OFFSET gs, PT_GS*/
22270 .endm
22271 .macro SET_KERNEL_GS reg
22272+
22273+#ifdef CONFIG_CC_STACKPROTECTOR
22274 movl $(__KERNEL_STACK_CANARY), \reg
22275+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22276+ movl $(__USER_DS), \reg
22277+#else
22278+ xorl \reg, \reg
22279+#endif
22280+
22281 movl \reg, %gs
22282 .endm
22283
22284 #endif /* CONFIG_X86_32_LAZY_GS */
22285
22286-.macro SAVE_ALL
22287+.macro pax_enter_kernel
22288+#ifdef CONFIG_PAX_KERNEXEC
22289+ call pax_enter_kernel
22290+#endif
22291+.endm
22292+
22293+.macro pax_exit_kernel
22294+#ifdef CONFIG_PAX_KERNEXEC
22295+ call pax_exit_kernel
22296+#endif
22297+.endm
22298+
22299+#ifdef CONFIG_PAX_KERNEXEC
22300+ENTRY(pax_enter_kernel)
22301+#ifdef CONFIG_PARAVIRT
22302+ pushl %eax
22303+ pushl %ecx
22304+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22305+ mov %eax, %esi
22306+#else
22307+ mov %cr0, %esi
22308+#endif
22309+ bts $16, %esi
22310+ jnc 1f
22311+ mov %cs, %esi
22312+ cmp $__KERNEL_CS, %esi
22313+ jz 3f
22314+ ljmp $__KERNEL_CS, $3f
22315+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22316+2:
22317+#ifdef CONFIG_PARAVIRT
22318+ mov %esi, %eax
22319+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22320+#else
22321+ mov %esi, %cr0
22322+#endif
22323+3:
22324+#ifdef CONFIG_PARAVIRT
22325+ popl %ecx
22326+ popl %eax
22327+#endif
22328+ ret
22329+ENDPROC(pax_enter_kernel)
22330+
22331+ENTRY(pax_exit_kernel)
22332+#ifdef CONFIG_PARAVIRT
22333+ pushl %eax
22334+ pushl %ecx
22335+#endif
22336+ mov %cs, %esi
22337+ cmp $__KERNEXEC_KERNEL_CS, %esi
22338+ jnz 2f
22339+#ifdef CONFIG_PARAVIRT
22340+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22341+ mov %eax, %esi
22342+#else
22343+ mov %cr0, %esi
22344+#endif
22345+ btr $16, %esi
22346+ ljmp $__KERNEL_CS, $1f
22347+1:
22348+#ifdef CONFIG_PARAVIRT
22349+ mov %esi, %eax
22350+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22351+#else
22352+ mov %esi, %cr0
22353+#endif
22354+2:
22355+#ifdef CONFIG_PARAVIRT
22356+ popl %ecx
22357+ popl %eax
22358+#endif
22359+ ret
22360+ENDPROC(pax_exit_kernel)
22361+#endif
22362+
22363+ .macro pax_erase_kstack
22364+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22365+ call pax_erase_kstack
22366+#endif
22367+ .endm
22368+
22369+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22370+/*
22371+ * ebp: thread_info
22372+ */
22373+ENTRY(pax_erase_kstack)
22374+ pushl %edi
22375+ pushl %ecx
22376+ pushl %eax
22377+
22378+ mov TI_lowest_stack(%ebp), %edi
22379+ mov $-0xBEEF, %eax
22380+ std
22381+
22382+1: mov %edi, %ecx
22383+ and $THREAD_SIZE_asm - 1, %ecx
22384+ shr $2, %ecx
22385+ repne scasl
22386+ jecxz 2f
22387+
22388+ cmp $2*16, %ecx
22389+ jc 2f
22390+
22391+ mov $2*16, %ecx
22392+ repe scasl
22393+ jecxz 2f
22394+ jne 1b
22395+
22396+2: cld
22397+ or $2*4, %edi
22398+ mov %esp, %ecx
22399+ sub %edi, %ecx
22400+
22401+ cmp $THREAD_SIZE_asm, %ecx
22402+ jb 3f
22403+ ud2
22404+3:
22405+
22406+ shr $2, %ecx
22407+ rep stosl
22408+
22409+ mov TI_task_thread_sp0(%ebp), %edi
22410+ sub $128, %edi
22411+ mov %edi, TI_lowest_stack(%ebp)
22412+
22413+ popl %eax
22414+ popl %ecx
22415+ popl %edi
22416+ ret
22417+ENDPROC(pax_erase_kstack)
22418+#endif
22419+
22420+.macro __SAVE_ALL _DS
22421 cld
22422 PUSH_GS
22423 pushl_cfi %fs
22424@@ -206,7 +347,7 @@
22425 CFI_REL_OFFSET ecx, 0
22426 pushl_cfi %ebx
22427 CFI_REL_OFFSET ebx, 0
22428- movl $(__USER_DS), %edx
22429+ movl $\_DS, %edx
22430 movl %edx, %ds
22431 movl %edx, %es
22432 movl $(__KERNEL_PERCPU), %edx
22433@@ -214,6 +355,15 @@
22434 SET_KERNEL_GS %edx
22435 .endm
22436
22437+.macro SAVE_ALL
22438+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22439+ __SAVE_ALL __KERNEL_DS
22440+ pax_enter_kernel
22441+#else
22442+ __SAVE_ALL __USER_DS
22443+#endif
22444+.endm
22445+
22446 .macro RESTORE_INT_REGS
22447 popl_cfi %ebx
22448 CFI_RESTORE ebx
22449@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22450 popfl_cfi
22451 jmp syscall_exit
22452 CFI_ENDPROC
22453-END(ret_from_fork)
22454+ENDPROC(ret_from_fork)
22455
22456 ENTRY(ret_from_kernel_thread)
22457 CFI_STARTPROC
22458@@ -340,7 +490,15 @@ ret_from_intr:
22459 andl $SEGMENT_RPL_MASK, %eax
22460 #endif
22461 cmpl $USER_RPL, %eax
22462+
22463+#ifdef CONFIG_PAX_KERNEXEC
22464+ jae resume_userspace
22465+
22466+ pax_exit_kernel
22467+ jmp resume_kernel
22468+#else
22469 jb resume_kernel # not returning to v8086 or userspace
22470+#endif
22471
22472 ENTRY(resume_userspace)
22473 LOCKDEP_SYS_EXIT
22474@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22475 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22476 # int/exception return?
22477 jne work_pending
22478- jmp restore_all
22479-END(ret_from_exception)
22480+ jmp restore_all_pax
22481+ENDPROC(ret_from_exception)
22482
22483 #ifdef CONFIG_PREEMPT
22484 ENTRY(resume_kernel)
22485@@ -365,7 +523,7 @@ need_resched:
22486 jz restore_all
22487 call preempt_schedule_irq
22488 jmp need_resched
22489-END(resume_kernel)
22490+ENDPROC(resume_kernel)
22491 #endif
22492 CFI_ENDPROC
22493
22494@@ -395,30 +553,45 @@ sysenter_past_esp:
22495 /*CFI_REL_OFFSET cs, 0*/
22496 /*
22497 * Push current_thread_info()->sysenter_return to the stack.
22498- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22499- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22500 */
22501- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22502+ pushl_cfi $0
22503 CFI_REL_OFFSET eip, 0
22504
22505 pushl_cfi %eax
22506 SAVE_ALL
22507+ GET_THREAD_INFO(%ebp)
22508+ movl TI_sysenter_return(%ebp),%ebp
22509+ movl %ebp,PT_EIP(%esp)
22510 ENABLE_INTERRUPTS(CLBR_NONE)
22511
22512 /*
22513 * Load the potential sixth argument from user stack.
22514 * Careful about security.
22515 */
22516+ movl PT_OLDESP(%esp),%ebp
22517+
22518+#ifdef CONFIG_PAX_MEMORY_UDEREF
22519+ mov PT_OLDSS(%esp),%ds
22520+1: movl %ds:(%ebp),%ebp
22521+ push %ss
22522+ pop %ds
22523+#else
22524 cmpl $__PAGE_OFFSET-3,%ebp
22525 jae syscall_fault
22526 ASM_STAC
22527 1: movl (%ebp),%ebp
22528 ASM_CLAC
22529+#endif
22530+
22531 movl %ebp,PT_EBP(%esp)
22532 _ASM_EXTABLE(1b,syscall_fault)
22533
22534 GET_THREAD_INFO(%ebp)
22535
22536+#ifdef CONFIG_PAX_RANDKSTACK
22537+ pax_erase_kstack
22538+#endif
22539+
22540 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22541 jnz sysenter_audit
22542 sysenter_do_call:
22543@@ -434,12 +607,24 @@ sysenter_after_call:
22544 testl $_TIF_ALLWORK_MASK, %ecx
22545 jne sysexit_audit
22546 sysenter_exit:
22547+
22548+#ifdef CONFIG_PAX_RANDKSTACK
22549+ pushl_cfi %eax
22550+ movl %esp, %eax
22551+ call pax_randomize_kstack
22552+ popl_cfi %eax
22553+#endif
22554+
22555+ pax_erase_kstack
22556+
22557 /* if something modifies registers it must also disable sysexit */
22558 movl PT_EIP(%esp), %edx
22559 movl PT_OLDESP(%esp), %ecx
22560 xorl %ebp,%ebp
22561 TRACE_IRQS_ON
22562 1: mov PT_FS(%esp), %fs
22563+2: mov PT_DS(%esp), %ds
22564+3: mov PT_ES(%esp), %es
22565 PTGS_TO_GS
22566 ENABLE_INTERRUPTS_SYSEXIT
22567
22568@@ -453,6 +638,9 @@ sysenter_audit:
22569 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22570 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22571 call __audit_syscall_entry
22572+
22573+ pax_erase_kstack
22574+
22575 popl_cfi %ecx /* get that remapped edx off the stack */
22576 popl_cfi %ecx /* get that remapped esi off the stack */
22577 movl PT_EAX(%esp),%eax /* reload syscall number */
22578@@ -479,10 +667,16 @@ sysexit_audit:
22579
22580 CFI_ENDPROC
22581 .pushsection .fixup,"ax"
22582-2: movl $0,PT_FS(%esp)
22583+4: movl $0,PT_FS(%esp)
22584+ jmp 1b
22585+5: movl $0,PT_DS(%esp)
22586+ jmp 1b
22587+6: movl $0,PT_ES(%esp)
22588 jmp 1b
22589 .popsection
22590- _ASM_EXTABLE(1b,2b)
22591+ _ASM_EXTABLE(1b,4b)
22592+ _ASM_EXTABLE(2b,5b)
22593+ _ASM_EXTABLE(3b,6b)
22594 PTGS_TO_GS_EX
22595 ENDPROC(ia32_sysenter_target)
22596
22597@@ -493,6 +687,11 @@ ENTRY(system_call)
22598 pushl_cfi %eax # save orig_eax
22599 SAVE_ALL
22600 GET_THREAD_INFO(%ebp)
22601+
22602+#ifdef CONFIG_PAX_RANDKSTACK
22603+ pax_erase_kstack
22604+#endif
22605+
22606 # system call tracing in operation / emulation
22607 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22608 jnz syscall_trace_entry
22609@@ -512,6 +711,15 @@ syscall_exit:
22610 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22611 jne syscall_exit_work
22612
22613+restore_all_pax:
22614+
22615+#ifdef CONFIG_PAX_RANDKSTACK
22616+ movl %esp, %eax
22617+ call pax_randomize_kstack
22618+#endif
22619+
22620+ pax_erase_kstack
22621+
22622 restore_all:
22623 TRACE_IRQS_IRET
22624 restore_all_notrace:
22625@@ -566,14 +774,34 @@ ldt_ss:
22626 * compensating for the offset by changing to the ESPFIX segment with
22627 * a base address that matches for the difference.
22628 */
22629-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22630+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22631 mov %esp, %edx /* load kernel esp */
22632 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22633 mov %dx, %ax /* eax: new kernel esp */
22634 sub %eax, %edx /* offset (low word is 0) */
22635+#ifdef CONFIG_SMP
22636+ movl PER_CPU_VAR(cpu_number), %ebx
22637+ shll $PAGE_SHIFT_asm, %ebx
22638+ addl $cpu_gdt_table, %ebx
22639+#else
22640+ movl $cpu_gdt_table, %ebx
22641+#endif
22642 shr $16, %edx
22643- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22644- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22645+
22646+#ifdef CONFIG_PAX_KERNEXEC
22647+ mov %cr0, %esi
22648+ btr $16, %esi
22649+ mov %esi, %cr0
22650+#endif
22651+
22652+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22653+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22654+
22655+#ifdef CONFIG_PAX_KERNEXEC
22656+ bts $16, %esi
22657+ mov %esi, %cr0
22658+#endif
22659+
22660 pushl_cfi $__ESPFIX_SS
22661 pushl_cfi %eax /* new kernel esp */
22662 /* Disable interrupts, but do not irqtrace this section: we
22663@@ -603,20 +831,18 @@ work_resched:
22664 movl TI_flags(%ebp), %ecx
22665 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22666 # than syscall tracing?
22667- jz restore_all
22668+ jz restore_all_pax
22669 testb $_TIF_NEED_RESCHED, %cl
22670 jnz work_resched
22671
22672 work_notifysig: # deal with pending signals and
22673 # notify-resume requests
22674+ movl %esp, %eax
22675 #ifdef CONFIG_VM86
22676 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22677- movl %esp, %eax
22678 jne work_notifysig_v86 # returning to kernel-space or
22679 # vm86-space
22680 1:
22681-#else
22682- movl %esp, %eax
22683 #endif
22684 TRACE_IRQS_ON
22685 ENABLE_INTERRUPTS(CLBR_NONE)
22686@@ -637,7 +863,7 @@ work_notifysig_v86:
22687 movl %eax, %esp
22688 jmp 1b
22689 #endif
22690-END(work_pending)
22691+ENDPROC(work_pending)
22692
22693 # perform syscall exit tracing
22694 ALIGN
22695@@ -645,11 +871,14 @@ syscall_trace_entry:
22696 movl $-ENOSYS,PT_EAX(%esp)
22697 movl %esp, %eax
22698 call syscall_trace_enter
22699+
22700+ pax_erase_kstack
22701+
22702 /* What it returned is what we'll actually use. */
22703 cmpl $(NR_syscalls), %eax
22704 jnae syscall_call
22705 jmp syscall_exit
22706-END(syscall_trace_entry)
22707+ENDPROC(syscall_trace_entry)
22708
22709 # perform syscall exit tracing
22710 ALIGN
22711@@ -662,26 +891,30 @@ syscall_exit_work:
22712 movl %esp, %eax
22713 call syscall_trace_leave
22714 jmp resume_userspace
22715-END(syscall_exit_work)
22716+ENDPROC(syscall_exit_work)
22717 CFI_ENDPROC
22718
22719 RING0_INT_FRAME # can't unwind into user space anyway
22720 syscall_fault:
22721+#ifdef CONFIG_PAX_MEMORY_UDEREF
22722+ push %ss
22723+ pop %ds
22724+#endif
22725 ASM_CLAC
22726 GET_THREAD_INFO(%ebp)
22727 movl $-EFAULT,PT_EAX(%esp)
22728 jmp resume_userspace
22729-END(syscall_fault)
22730+ENDPROC(syscall_fault)
22731
22732 syscall_badsys:
22733 movl $-ENOSYS,%eax
22734 jmp syscall_after_call
22735-END(syscall_badsys)
22736+ENDPROC(syscall_badsys)
22737
22738 sysenter_badsys:
22739 movl $-ENOSYS,%eax
22740 jmp sysenter_after_call
22741-END(sysenter_badsys)
22742+ENDPROC(sysenter_badsys)
22743 CFI_ENDPROC
22744
22745 .macro FIXUP_ESPFIX_STACK
22746@@ -694,8 +927,15 @@ END(sysenter_badsys)
22747 */
22748 #ifdef CONFIG_X86_ESPFIX32
22749 /* fixup the stack */
22750- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22751- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22752+#ifdef CONFIG_SMP
22753+ movl PER_CPU_VAR(cpu_number), %ebx
22754+ shll $PAGE_SHIFT_asm, %ebx
22755+ addl $cpu_gdt_table, %ebx
22756+#else
22757+ movl $cpu_gdt_table, %ebx
22758+#endif
22759+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22760+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22761 shl $16, %eax
22762 addl %esp, %eax /* the adjusted stack pointer */
22763 pushl_cfi $__KERNEL_DS
22764@@ -751,7 +991,7 @@ vector=vector+1
22765 .endr
22766 2: jmp common_interrupt
22767 .endr
22768-END(irq_entries_start)
22769+ENDPROC(irq_entries_start)
22770
22771 .previous
22772 END(interrupt)
22773@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22774 pushl_cfi $do_coprocessor_error
22775 jmp error_code
22776 CFI_ENDPROC
22777-END(coprocessor_error)
22778+ENDPROC(coprocessor_error)
22779
22780 ENTRY(simd_coprocessor_error)
22781 RING0_INT_FRAME
22782@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22783 .section .altinstructions,"a"
22784 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22785 .previous
22786-.section .altinstr_replacement,"ax"
22787+.section .altinstr_replacement,"a"
22788 663: pushl $do_simd_coprocessor_error
22789 664:
22790 .previous
22791@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22792 #endif
22793 jmp error_code
22794 CFI_ENDPROC
22795-END(simd_coprocessor_error)
22796+ENDPROC(simd_coprocessor_error)
22797
22798 ENTRY(device_not_available)
22799 RING0_INT_FRAME
22800@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22801 pushl_cfi $do_device_not_available
22802 jmp error_code
22803 CFI_ENDPROC
22804-END(device_not_available)
22805+ENDPROC(device_not_available)
22806
22807 #ifdef CONFIG_PARAVIRT
22808 ENTRY(native_iret)
22809 iret
22810 _ASM_EXTABLE(native_iret, iret_exc)
22811-END(native_iret)
22812+ENDPROC(native_iret)
22813
22814 ENTRY(native_irq_enable_sysexit)
22815 sti
22816 sysexit
22817-END(native_irq_enable_sysexit)
22818+ENDPROC(native_irq_enable_sysexit)
22819 #endif
22820
22821 ENTRY(overflow)
22822@@ -860,7 +1100,7 @@ ENTRY(overflow)
22823 pushl_cfi $do_overflow
22824 jmp error_code
22825 CFI_ENDPROC
22826-END(overflow)
22827+ENDPROC(overflow)
22828
22829 ENTRY(bounds)
22830 RING0_INT_FRAME
22831@@ -869,7 +1109,7 @@ ENTRY(bounds)
22832 pushl_cfi $do_bounds
22833 jmp error_code
22834 CFI_ENDPROC
22835-END(bounds)
22836+ENDPROC(bounds)
22837
22838 ENTRY(invalid_op)
22839 RING0_INT_FRAME
22840@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22841 pushl_cfi $do_invalid_op
22842 jmp error_code
22843 CFI_ENDPROC
22844-END(invalid_op)
22845+ENDPROC(invalid_op)
22846
22847 ENTRY(coprocessor_segment_overrun)
22848 RING0_INT_FRAME
22849@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22850 pushl_cfi $do_coprocessor_segment_overrun
22851 jmp error_code
22852 CFI_ENDPROC
22853-END(coprocessor_segment_overrun)
22854+ENDPROC(coprocessor_segment_overrun)
22855
22856 ENTRY(invalid_TSS)
22857 RING0_EC_FRAME
22858@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22859 pushl_cfi $do_invalid_TSS
22860 jmp error_code
22861 CFI_ENDPROC
22862-END(invalid_TSS)
22863+ENDPROC(invalid_TSS)
22864
22865 ENTRY(segment_not_present)
22866 RING0_EC_FRAME
22867@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22868 pushl_cfi $do_segment_not_present
22869 jmp error_code
22870 CFI_ENDPROC
22871-END(segment_not_present)
22872+ENDPROC(segment_not_present)
22873
22874 ENTRY(stack_segment)
22875 RING0_EC_FRAME
22876@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22877 pushl_cfi $do_stack_segment
22878 jmp error_code
22879 CFI_ENDPROC
22880-END(stack_segment)
22881+ENDPROC(stack_segment)
22882
22883 ENTRY(alignment_check)
22884 RING0_EC_FRAME
22885@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22886 pushl_cfi $do_alignment_check
22887 jmp error_code
22888 CFI_ENDPROC
22889-END(alignment_check)
22890+ENDPROC(alignment_check)
22891
22892 ENTRY(divide_error)
22893 RING0_INT_FRAME
22894@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22895 pushl_cfi $do_divide_error
22896 jmp error_code
22897 CFI_ENDPROC
22898-END(divide_error)
22899+ENDPROC(divide_error)
22900
22901 #ifdef CONFIG_X86_MCE
22902 ENTRY(machine_check)
22903@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22904 pushl_cfi machine_check_vector
22905 jmp error_code
22906 CFI_ENDPROC
22907-END(machine_check)
22908+ENDPROC(machine_check)
22909 #endif
22910
22911 ENTRY(spurious_interrupt_bug)
22912@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22913 pushl_cfi $do_spurious_interrupt_bug
22914 jmp error_code
22915 CFI_ENDPROC
22916-END(spurious_interrupt_bug)
22917+ENDPROC(spurious_interrupt_bug)
22918
22919 #ifdef CONFIG_XEN
22920 /* Xen doesn't set %esp to be precisely what the normal sysenter
22921@@ -1057,7 +1297,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22922
22923 ENTRY(mcount)
22924 ret
22925-END(mcount)
22926+ENDPROC(mcount)
22927
22928 ENTRY(ftrace_caller)
22929 pushl %eax
22930@@ -1087,7 +1327,7 @@ ftrace_graph_call:
22931 .globl ftrace_stub
22932 ftrace_stub:
22933 ret
22934-END(ftrace_caller)
22935+ENDPROC(ftrace_caller)
22936
22937 ENTRY(ftrace_regs_caller)
22938 pushf /* push flags before compare (in cs location) */
22939@@ -1185,7 +1425,7 @@ trace:
22940 popl %ecx
22941 popl %eax
22942 jmp ftrace_stub
22943-END(mcount)
22944+ENDPROC(mcount)
22945 #endif /* CONFIG_DYNAMIC_FTRACE */
22946 #endif /* CONFIG_FUNCTION_TRACER */
22947
22948@@ -1203,7 +1443,7 @@ ENTRY(ftrace_graph_caller)
22949 popl %ecx
22950 popl %eax
22951 ret
22952-END(ftrace_graph_caller)
22953+ENDPROC(ftrace_graph_caller)
22954
22955 .globl return_to_handler
22956 return_to_handler:
22957@@ -1264,15 +1504,18 @@ error_code:
22958 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22959 REG_TO_PTGS %ecx
22960 SET_KERNEL_GS %ecx
22961- movl $(__USER_DS), %ecx
22962+ movl $(__KERNEL_DS), %ecx
22963 movl %ecx, %ds
22964 movl %ecx, %es
22965+
22966+ pax_enter_kernel
22967+
22968 TRACE_IRQS_OFF
22969 movl %esp,%eax # pt_regs pointer
22970 call *%edi
22971 jmp ret_from_exception
22972 CFI_ENDPROC
22973-END(page_fault)
22974+ENDPROC(page_fault)
22975
22976 /*
22977 * Debug traps and NMI can happen at the one SYSENTER instruction
22978@@ -1315,7 +1558,7 @@ debug_stack_correct:
22979 call do_debug
22980 jmp ret_from_exception
22981 CFI_ENDPROC
22982-END(debug)
22983+ENDPROC(debug)
22984
22985 /*
22986 * NMI is doubly nasty. It can happen _while_ we're handling
22987@@ -1355,6 +1598,9 @@ nmi_stack_correct:
22988 xorl %edx,%edx # zero error code
22989 movl %esp,%eax # pt_regs pointer
22990 call do_nmi
22991+
22992+ pax_exit_kernel
22993+
22994 jmp restore_all_notrace
22995 CFI_ENDPROC
22996
22997@@ -1392,13 +1638,16 @@ nmi_espfix_stack:
22998 FIXUP_ESPFIX_STACK # %eax == %esp
22999 xorl %edx,%edx # zero error code
23000 call do_nmi
23001+
23002+ pax_exit_kernel
23003+
23004 RESTORE_REGS
23005 lss 12+4(%esp), %esp # back to espfix stack
23006 CFI_ADJUST_CFA_OFFSET -24
23007 jmp irq_return
23008 #endif
23009 CFI_ENDPROC
23010-END(nmi)
23011+ENDPROC(nmi)
23012
23013 ENTRY(int3)
23014 RING0_INT_FRAME
23015@@ -1411,14 +1660,14 @@ ENTRY(int3)
23016 call do_int3
23017 jmp ret_from_exception
23018 CFI_ENDPROC
23019-END(int3)
23020+ENDPROC(int3)
23021
23022 ENTRY(general_protection)
23023 RING0_EC_FRAME
23024 pushl_cfi $do_general_protection
23025 jmp error_code
23026 CFI_ENDPROC
23027-END(general_protection)
23028+ENDPROC(general_protection)
23029
23030 #ifdef CONFIG_KVM_GUEST
23031 ENTRY(async_page_fault)
23032@@ -1427,6 +1676,6 @@ ENTRY(async_page_fault)
23033 pushl_cfi $do_async_page_fault
23034 jmp error_code
23035 CFI_ENDPROC
23036-END(async_page_fault)
23037+ENDPROC(async_page_fault)
23038 #endif
23039
23040diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23041index f0095a7..ec77893 100644
23042--- a/arch/x86/kernel/entry_64.S
23043+++ b/arch/x86/kernel/entry_64.S
23044@@ -59,6 +59,8 @@
23045 #include <asm/smap.h>
23046 #include <asm/pgtable_types.h>
23047 #include <linux/err.h>
23048+#include <asm/pgtable.h>
23049+#include <asm/alternative-asm.h>
23050
23051 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23052 #include <linux/elf-em.h>
23053@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23054 ENDPROC(native_usergs_sysret64)
23055 #endif /* CONFIG_PARAVIRT */
23056
23057+ .macro ljmpq sel, off
23058+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23059+ .byte 0x48; ljmp *1234f(%rip)
23060+ .pushsection .rodata
23061+ .align 16
23062+ 1234: .quad \off; .word \sel
23063+ .popsection
23064+#else
23065+ pushq $\sel
23066+ pushq $\off
23067+ lretq
23068+#endif
23069+ .endm
23070+
23071+ .macro pax_enter_kernel
23072+ pax_set_fptr_mask
23073+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23074+ call pax_enter_kernel
23075+#endif
23076+ .endm
23077+
23078+ .macro pax_exit_kernel
23079+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23080+ call pax_exit_kernel
23081+#endif
23082+
23083+ .endm
23084+
23085+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23086+ENTRY(pax_enter_kernel)
23087+ pushq %rdi
23088+
23089+#ifdef CONFIG_PARAVIRT
23090+ PV_SAVE_REGS(CLBR_RDI)
23091+#endif
23092+
23093+#ifdef CONFIG_PAX_KERNEXEC
23094+ GET_CR0_INTO_RDI
23095+ bts $16,%rdi
23096+ jnc 3f
23097+ mov %cs,%edi
23098+ cmp $__KERNEL_CS,%edi
23099+ jnz 2f
23100+1:
23101+#endif
23102+
23103+#ifdef CONFIG_PAX_MEMORY_UDEREF
23104+ 661: jmp 111f
23105+ .pushsection .altinstr_replacement, "a"
23106+ 662: ASM_NOP2
23107+ .popsection
23108+ .pushsection .altinstructions, "a"
23109+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23110+ .popsection
23111+ GET_CR3_INTO_RDI
23112+ cmp $0,%dil
23113+ jnz 112f
23114+ mov $__KERNEL_DS,%edi
23115+ mov %edi,%ss
23116+ jmp 111f
23117+112: cmp $1,%dil
23118+ jz 113f
23119+ ud2
23120+113: sub $4097,%rdi
23121+ bts $63,%rdi
23122+ SET_RDI_INTO_CR3
23123+ mov $__UDEREF_KERNEL_DS,%edi
23124+ mov %edi,%ss
23125+111:
23126+#endif
23127+
23128+#ifdef CONFIG_PARAVIRT
23129+ PV_RESTORE_REGS(CLBR_RDI)
23130+#endif
23131+
23132+ popq %rdi
23133+ pax_force_retaddr
23134+ retq
23135+
23136+#ifdef CONFIG_PAX_KERNEXEC
23137+2: ljmpq __KERNEL_CS,1b
23138+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23139+4: SET_RDI_INTO_CR0
23140+ jmp 1b
23141+#endif
23142+ENDPROC(pax_enter_kernel)
23143+
23144+ENTRY(pax_exit_kernel)
23145+ pushq %rdi
23146+
23147+#ifdef CONFIG_PARAVIRT
23148+ PV_SAVE_REGS(CLBR_RDI)
23149+#endif
23150+
23151+#ifdef CONFIG_PAX_KERNEXEC
23152+ mov %cs,%rdi
23153+ cmp $__KERNEXEC_KERNEL_CS,%edi
23154+ jz 2f
23155+ GET_CR0_INTO_RDI
23156+ bts $16,%rdi
23157+ jnc 4f
23158+1:
23159+#endif
23160+
23161+#ifdef CONFIG_PAX_MEMORY_UDEREF
23162+ 661: jmp 111f
23163+ .pushsection .altinstr_replacement, "a"
23164+ 662: ASM_NOP2
23165+ .popsection
23166+ .pushsection .altinstructions, "a"
23167+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23168+ .popsection
23169+ mov %ss,%edi
23170+ cmp $__UDEREF_KERNEL_DS,%edi
23171+ jnz 111f
23172+ GET_CR3_INTO_RDI
23173+ cmp $0,%dil
23174+ jz 112f
23175+ ud2
23176+112: add $4097,%rdi
23177+ bts $63,%rdi
23178+ SET_RDI_INTO_CR3
23179+ mov $__KERNEL_DS,%edi
23180+ mov %edi,%ss
23181+111:
23182+#endif
23183+
23184+#ifdef CONFIG_PARAVIRT
23185+ PV_RESTORE_REGS(CLBR_RDI);
23186+#endif
23187+
23188+ popq %rdi
23189+ pax_force_retaddr
23190+ retq
23191+
23192+#ifdef CONFIG_PAX_KERNEXEC
23193+2: GET_CR0_INTO_RDI
23194+ btr $16,%rdi
23195+ jnc 4f
23196+ ljmpq __KERNEL_CS,3f
23197+3: SET_RDI_INTO_CR0
23198+ jmp 1b
23199+4: ud2
23200+ jmp 4b
23201+#endif
23202+ENDPROC(pax_exit_kernel)
23203+#endif
23204+
23205+ .macro pax_enter_kernel_user
23206+ pax_set_fptr_mask
23207+#ifdef CONFIG_PAX_MEMORY_UDEREF
23208+ call pax_enter_kernel_user
23209+#endif
23210+ .endm
23211+
23212+ .macro pax_exit_kernel_user
23213+#ifdef CONFIG_PAX_MEMORY_UDEREF
23214+ call pax_exit_kernel_user
23215+#endif
23216+#ifdef CONFIG_PAX_RANDKSTACK
23217+ pushq %rax
23218+ pushq %r11
23219+ call pax_randomize_kstack
23220+ popq %r11
23221+ popq %rax
23222+#endif
23223+ .endm
23224+
23225+#ifdef CONFIG_PAX_MEMORY_UDEREF
23226+ENTRY(pax_enter_kernel_user)
23227+ pushq %rdi
23228+ pushq %rbx
23229+
23230+#ifdef CONFIG_PARAVIRT
23231+ PV_SAVE_REGS(CLBR_RDI)
23232+#endif
23233+
23234+ 661: jmp 111f
23235+ .pushsection .altinstr_replacement, "a"
23236+ 662: ASM_NOP2
23237+ .popsection
23238+ .pushsection .altinstructions, "a"
23239+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23240+ .popsection
23241+ GET_CR3_INTO_RDI
23242+ cmp $1,%dil
23243+ jnz 4f
23244+ sub $4097,%rdi
23245+ bts $63,%rdi
23246+ SET_RDI_INTO_CR3
23247+ jmp 3f
23248+111:
23249+
23250+ GET_CR3_INTO_RDI
23251+ mov %rdi,%rbx
23252+ add $__START_KERNEL_map,%rbx
23253+ sub phys_base(%rip),%rbx
23254+
23255+#ifdef CONFIG_PARAVIRT
23256+ cmpl $0, pv_info+PARAVIRT_enabled
23257+ jz 1f
23258+ pushq %rdi
23259+ i = 0
23260+ .rept USER_PGD_PTRS
23261+ mov i*8(%rbx),%rsi
23262+ mov $0,%sil
23263+ lea i*8(%rbx),%rdi
23264+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23265+ i = i + 1
23266+ .endr
23267+ popq %rdi
23268+ jmp 2f
23269+1:
23270+#endif
23271+
23272+ i = 0
23273+ .rept USER_PGD_PTRS
23274+ movb $0,i*8(%rbx)
23275+ i = i + 1
23276+ .endr
23277+
23278+2: SET_RDI_INTO_CR3
23279+
23280+#ifdef CONFIG_PAX_KERNEXEC
23281+ GET_CR0_INTO_RDI
23282+ bts $16,%rdi
23283+ SET_RDI_INTO_CR0
23284+#endif
23285+
23286+3:
23287+
23288+#ifdef CONFIG_PARAVIRT
23289+ PV_RESTORE_REGS(CLBR_RDI)
23290+#endif
23291+
23292+ popq %rbx
23293+ popq %rdi
23294+ pax_force_retaddr
23295+ retq
23296+4: ud2
23297+ENDPROC(pax_enter_kernel_user)
23298+
23299+ENTRY(pax_exit_kernel_user)
23300+ pushq %rdi
23301+ pushq %rbx
23302+
23303+#ifdef CONFIG_PARAVIRT
23304+ PV_SAVE_REGS(CLBR_RDI)
23305+#endif
23306+
23307+ GET_CR3_INTO_RDI
23308+ 661: jmp 1f
23309+ .pushsection .altinstr_replacement, "a"
23310+ 662: ASM_NOP2
23311+ .popsection
23312+ .pushsection .altinstructions, "a"
23313+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23314+ .popsection
23315+ cmp $0,%dil
23316+ jnz 3f
23317+ add $4097,%rdi
23318+ bts $63,%rdi
23319+ SET_RDI_INTO_CR3
23320+ jmp 2f
23321+1:
23322+
23323+ mov %rdi,%rbx
23324+
23325+#ifdef CONFIG_PAX_KERNEXEC
23326+ GET_CR0_INTO_RDI
23327+ btr $16,%rdi
23328+ jnc 3f
23329+ SET_RDI_INTO_CR0
23330+#endif
23331+
23332+ add $__START_KERNEL_map,%rbx
23333+ sub phys_base(%rip),%rbx
23334+
23335+#ifdef CONFIG_PARAVIRT
23336+ cmpl $0, pv_info+PARAVIRT_enabled
23337+ jz 1f
23338+ i = 0
23339+ .rept USER_PGD_PTRS
23340+ mov i*8(%rbx),%rsi
23341+ mov $0x67,%sil
23342+ lea i*8(%rbx),%rdi
23343+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23344+ i = i + 1
23345+ .endr
23346+ jmp 2f
23347+1:
23348+#endif
23349+
23350+ i = 0
23351+ .rept USER_PGD_PTRS
23352+ movb $0x67,i*8(%rbx)
23353+ i = i + 1
23354+ .endr
23355+2:
23356+
23357+#ifdef CONFIG_PARAVIRT
23358+ PV_RESTORE_REGS(CLBR_RDI)
23359+#endif
23360+
23361+ popq %rbx
23362+ popq %rdi
23363+ pax_force_retaddr
23364+ retq
23365+3: ud2
23366+ENDPROC(pax_exit_kernel_user)
23367+#endif
23368+
23369+ .macro pax_enter_kernel_nmi
23370+ pax_set_fptr_mask
23371+
23372+#ifdef CONFIG_PAX_KERNEXEC
23373+ GET_CR0_INTO_RDI
23374+ bts $16,%rdi
23375+ jc 110f
23376+ SET_RDI_INTO_CR0
23377+ or $2,%ebx
23378+110:
23379+#endif
23380+
23381+#ifdef CONFIG_PAX_MEMORY_UDEREF
23382+ 661: jmp 111f
23383+ .pushsection .altinstr_replacement, "a"
23384+ 662: ASM_NOP2
23385+ .popsection
23386+ .pushsection .altinstructions, "a"
23387+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23388+ .popsection
23389+ GET_CR3_INTO_RDI
23390+ cmp $0,%dil
23391+ jz 111f
23392+ sub $4097,%rdi
23393+ or $4,%ebx
23394+ bts $63,%rdi
23395+ SET_RDI_INTO_CR3
23396+ mov $__UDEREF_KERNEL_DS,%edi
23397+ mov %edi,%ss
23398+111:
23399+#endif
23400+ .endm
23401+
23402+ .macro pax_exit_kernel_nmi
23403+#ifdef CONFIG_PAX_KERNEXEC
23404+ btr $1,%ebx
23405+ jnc 110f
23406+ GET_CR0_INTO_RDI
23407+ btr $16,%rdi
23408+ SET_RDI_INTO_CR0
23409+110:
23410+#endif
23411+
23412+#ifdef CONFIG_PAX_MEMORY_UDEREF
23413+ btr $2,%ebx
23414+ jnc 111f
23415+ GET_CR3_INTO_RDI
23416+ add $4097,%rdi
23417+ bts $63,%rdi
23418+ SET_RDI_INTO_CR3
23419+ mov $__KERNEL_DS,%edi
23420+ mov %edi,%ss
23421+111:
23422+#endif
23423+ .endm
23424+
23425+ .macro pax_erase_kstack
23426+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23427+ call pax_erase_kstack
23428+#endif
23429+ .endm
23430+
23431+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23432+ENTRY(pax_erase_kstack)
23433+ pushq %rdi
23434+ pushq %rcx
23435+ pushq %rax
23436+ pushq %r11
23437+
23438+ GET_THREAD_INFO(%r11)
23439+ mov TI_lowest_stack(%r11), %rdi
23440+ mov $-0xBEEF, %rax
23441+ std
23442+
23443+1: mov %edi, %ecx
23444+ and $THREAD_SIZE_asm - 1, %ecx
23445+ shr $3, %ecx
23446+ repne scasq
23447+ jecxz 2f
23448+
23449+ cmp $2*8, %ecx
23450+ jc 2f
23451+
23452+ mov $2*8, %ecx
23453+ repe scasq
23454+ jecxz 2f
23455+ jne 1b
23456+
23457+2: cld
23458+ or $2*8, %rdi
23459+ mov %esp, %ecx
23460+ sub %edi, %ecx
23461+
23462+ cmp $THREAD_SIZE_asm, %rcx
23463+ jb 3f
23464+ ud2
23465+3:
23466+
23467+ shr $3, %ecx
23468+ rep stosq
23469+
23470+ mov TI_task_thread_sp0(%r11), %rdi
23471+ sub $256, %rdi
23472+ mov %rdi, TI_lowest_stack(%r11)
23473+
23474+ popq %r11
23475+ popq %rax
23476+ popq %rcx
23477+ popq %rdi
23478+ pax_force_retaddr
23479+ ret
23480+ENDPROC(pax_erase_kstack)
23481+#endif
23482
23483 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23484 #ifdef CONFIG_TRACE_IRQFLAGS
23485@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23486 .endm
23487
23488 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23489- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23490+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23491 jnc 1f
23492 TRACE_IRQS_ON_DEBUG
23493 1:
23494@@ -243,9 +670,52 @@ ENTRY(save_paranoid)
23495 js 1f /* negative -> in kernel */
23496 SWAPGS
23497 xorl %ebx,%ebx
23498-1: ret
23499+1:
23500+#ifdef CONFIG_PAX_MEMORY_UDEREF
23501+ testb $3, CS+8(%rsp)
23502+ jnz 1f
23503+ pax_enter_kernel
23504+ jmp 2f
23505+1: pax_enter_kernel_user
23506+2:
23507+#else
23508+ pax_enter_kernel
23509+#endif
23510+ pax_force_retaddr
23511+ ret
23512 CFI_ENDPROC
23513-END(save_paranoid)
23514+ENDPROC(save_paranoid)
23515+
23516+ENTRY(save_paranoid_nmi)
23517+ XCPT_FRAME 1 RDI+8
23518+ cld
23519+ movq_cfi rdi, RDI+8
23520+ movq_cfi rsi, RSI+8
23521+ movq_cfi rdx, RDX+8
23522+ movq_cfi rcx, RCX+8
23523+ movq_cfi rax, RAX+8
23524+ movq_cfi r8, R8+8
23525+ movq_cfi r9, R9+8
23526+ movq_cfi r10, R10+8
23527+ movq_cfi r11, R11+8
23528+ movq_cfi rbx, RBX+8
23529+ movq_cfi rbp, RBP+8
23530+ movq_cfi r12, R12+8
23531+ movq_cfi r13, R13+8
23532+ movq_cfi r14, R14+8
23533+ movq_cfi r15, R15+8
23534+ movl $1,%ebx
23535+ movl $MSR_GS_BASE,%ecx
23536+ rdmsr
23537+ testl %edx,%edx
23538+ js 1f /* negative -> in kernel */
23539+ SWAPGS
23540+ xorl %ebx,%ebx
23541+1: pax_enter_kernel_nmi
23542+ pax_force_retaddr
23543+ ret
23544+ CFI_ENDPROC
23545+ENDPROC(save_paranoid_nmi)
23546
23547 /*
23548 * A newly forked process directly context switches into this address.
23549@@ -266,7 +736,7 @@ ENTRY(ret_from_fork)
23550
23551 RESTORE_REST
23552
23553- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23554+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23555 jz 1f
23556
23557 /*
23558@@ -279,15 +749,13 @@ ENTRY(ret_from_fork)
23559 jmp int_ret_from_sys_call
23560
23561 1:
23562- subq $REST_SKIP, %rsp # leave space for volatiles
23563- CFI_ADJUST_CFA_OFFSET REST_SKIP
23564 movq %rbp, %rdi
23565 call *%rbx
23566 movl $0, RAX(%rsp)
23567 RESTORE_REST
23568 jmp int_ret_from_sys_call
23569 CFI_ENDPROC
23570-END(ret_from_fork)
23571+ENDPROC(ret_from_fork)
23572
23573 /*
23574 * System call entry. Up to 6 arguments in registers are supported.
23575@@ -324,7 +792,7 @@ END(ret_from_fork)
23576 ENTRY(system_call)
23577 CFI_STARTPROC simple
23578 CFI_SIGNAL_FRAME
23579- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23580+ CFI_DEF_CFA rsp,0
23581 CFI_REGISTER rip,rcx
23582 /*CFI_REGISTER rflags,r11*/
23583 SWAPGS_UNSAFE_STACK
23584@@ -337,16 +805,23 @@ GLOBAL(system_call_after_swapgs)
23585
23586 movq %rsp,PER_CPU_VAR(old_rsp)
23587 movq PER_CPU_VAR(kernel_stack),%rsp
23588+ SAVE_ARGS 8*6, 0, rax_enosys=1
23589+ pax_enter_kernel_user
23590+
23591+#ifdef CONFIG_PAX_RANDKSTACK
23592+ pax_erase_kstack
23593+#endif
23594+
23595 /*
23596 * No need to follow this irqs off/on section - it's straight
23597 * and short:
23598 */
23599 ENABLE_INTERRUPTS(CLBR_NONE)
23600- SAVE_ARGS 8, 0, rax_enosys=1
23601 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23602 movq %rcx,RIP-ARGOFFSET(%rsp)
23603 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23604- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23605+ GET_THREAD_INFO(%rcx)
23606+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23607 jnz tracesys
23608 system_call_fastpath:
23609 #if __SYSCALL_MASK == ~0
23610@@ -376,10 +851,13 @@ ret_from_sys_call:
23611 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23612 * very bad.
23613 */
23614- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23615+ GET_THREAD_INFO(%rcx)
23616+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23617 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
23618
23619 CFI_REMEMBER_STATE
23620+ pax_exit_kernel_user
23621+ pax_erase_kstack
23622 /*
23623 * sysretq will re-enable interrupts:
23624 */
23625@@ -399,12 +877,15 @@ int_ret_from_sys_call_fixup:
23626
23627 /* Do syscall tracing */
23628 tracesys:
23629- leaq -REST_SKIP(%rsp), %rdi
23630+ movq %rsp, %rdi
23631 movq $AUDIT_ARCH_X86_64, %rsi
23632 call syscall_trace_enter_phase1
23633 test %rax, %rax
23634 jnz tracesys_phase2 /* if needed, run the slow path */
23635- LOAD_ARGS 0 /* else restore clobbered regs */
23636+
23637+ pax_erase_kstack
23638+
23639+ LOAD_ARGS /* else restore clobbered regs */
23640 jmp system_call_fastpath /* and return to the fast path */
23641
23642 tracesys_phase2:
23643@@ -415,12 +896,14 @@ tracesys_phase2:
23644 movq %rax,%rdx
23645 call syscall_trace_enter_phase2
23646
23647+ pax_erase_kstack
23648+
23649 /*
23650 * Reload arg registers from stack in case ptrace changed them.
23651 * We don't reload %rax because syscall_trace_entry_phase2() returned
23652 * the value it wants us to use in the table lookup.
23653 */
23654- LOAD_ARGS ARGOFFSET, 1
23655+ LOAD_ARGS 1
23656 RESTORE_REST
23657 #if __SYSCALL_MASK == ~0
23658 cmpq $__NR_syscall_max,%rax
23659@@ -451,7 +934,9 @@ GLOBAL(int_with_check)
23660 andl %edi,%edx
23661 jnz int_careful
23662 andl $~TS_COMPAT,TI_status(%rcx)
23663- jmp retint_swapgs
23664+ pax_exit_kernel_user
23665+ pax_erase_kstack
23666+ jmp retint_swapgs_pax
23667
23668 /* Either reschedule or signal or syscall exit tracking needed. */
23669 /* First do a reschedule test. */
23670@@ -497,7 +982,7 @@ int_restore_rest:
23671 TRACE_IRQS_OFF
23672 jmp int_with_check
23673 CFI_ENDPROC
23674-END(system_call)
23675+ENDPROC(system_call)
23676
23677 .macro FORK_LIKE func
23678 ENTRY(stub_\func)
23679@@ -510,9 +995,10 @@ ENTRY(stub_\func)
23680 DEFAULT_FRAME 0 8 /* offset 8: return address */
23681 call sys_\func
23682 RESTORE_TOP_OF_STACK %r11, 8
23683- ret $REST_SKIP /* pop extended registers */
23684+ pax_force_retaddr
23685+ ret
23686 CFI_ENDPROC
23687-END(stub_\func)
23688+ENDPROC(stub_\func)
23689 .endm
23690
23691 .macro FIXED_FRAME label,func
23692@@ -522,9 +1008,10 @@ ENTRY(\label)
23693 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23694 call \func
23695 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23696+ pax_force_retaddr
23697 ret
23698 CFI_ENDPROC
23699-END(\label)
23700+ENDPROC(\label)
23701 .endm
23702
23703 FORK_LIKE clone
23704@@ -543,7 +1030,7 @@ ENTRY(stub_execve)
23705 RESTORE_REST
23706 jmp int_ret_from_sys_call
23707 CFI_ENDPROC
23708-END(stub_execve)
23709+ENDPROC(stub_execve)
23710
23711 ENTRY(stub_execveat)
23712 CFI_STARTPROC
23713@@ -557,7 +1044,7 @@ ENTRY(stub_execveat)
23714 RESTORE_REST
23715 jmp int_ret_from_sys_call
23716 CFI_ENDPROC
23717-END(stub_execveat)
23718+ENDPROC(stub_execveat)
23719
23720 /*
23721 * sigreturn is special because it needs to restore all registers on return.
23722@@ -574,7 +1061,7 @@ ENTRY(stub_rt_sigreturn)
23723 RESTORE_REST
23724 jmp int_ret_from_sys_call
23725 CFI_ENDPROC
23726-END(stub_rt_sigreturn)
23727+ENDPROC(stub_rt_sigreturn)
23728
23729 #ifdef CONFIG_X86_X32_ABI
23730 ENTRY(stub_x32_rt_sigreturn)
23731@@ -588,7 +1075,7 @@ ENTRY(stub_x32_rt_sigreturn)
23732 RESTORE_REST
23733 jmp int_ret_from_sys_call
23734 CFI_ENDPROC
23735-END(stub_x32_rt_sigreturn)
23736+ENDPROC(stub_x32_rt_sigreturn)
23737
23738 ENTRY(stub_x32_execve)
23739 CFI_STARTPROC
23740@@ -602,7 +1089,7 @@ ENTRY(stub_x32_execve)
23741 RESTORE_REST
23742 jmp int_ret_from_sys_call
23743 CFI_ENDPROC
23744-END(stub_x32_execve)
23745+ENDPROC(stub_x32_execve)
23746
23747 ENTRY(stub_x32_execveat)
23748 CFI_STARTPROC
23749@@ -616,7 +1103,7 @@ ENTRY(stub_x32_execveat)
23750 RESTORE_REST
23751 jmp int_ret_from_sys_call
23752 CFI_ENDPROC
23753-END(stub_x32_execveat)
23754+ENDPROC(stub_x32_execveat)
23755
23756 #endif
23757
23758@@ -653,7 +1140,7 @@ vector=vector+1
23759 2: jmp common_interrupt
23760 .endr
23761 CFI_ENDPROC
23762-END(irq_entries_start)
23763+ENDPROC(irq_entries_start)
23764
23765 .previous
23766 END(interrupt)
23767@@ -670,28 +1157,29 @@ END(interrupt)
23768 /* 0(%rsp): ~(interrupt number) */
23769 .macro interrupt func
23770 /* reserve pt_regs for scratch regs and rbp */
23771- subq $ORIG_RAX-RBP, %rsp
23772- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23773+ subq $ORIG_RAX, %rsp
23774+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23775 cld
23776- /* start from rbp in pt_regs and jump over */
23777- movq_cfi rdi, (RDI-RBP)
23778- movq_cfi rsi, (RSI-RBP)
23779- movq_cfi rdx, (RDX-RBP)
23780- movq_cfi rcx, (RCX-RBP)
23781- movq_cfi rax, (RAX-RBP)
23782- movq_cfi r8, (R8-RBP)
23783- movq_cfi r9, (R9-RBP)
23784- movq_cfi r10, (R10-RBP)
23785- movq_cfi r11, (R11-RBP)
23786+ /* start from r15 in pt_regs and jump over */
23787+ movq_cfi rdi, RDI
23788+ movq_cfi rsi, RSI
23789+ movq_cfi rdx, RDX
23790+ movq_cfi rcx, RCX
23791+ movq_cfi rax, RAX
23792+ movq_cfi r8, R8
23793+ movq_cfi r9, R9
23794+ movq_cfi r10, R10
23795+ movq_cfi r11, R11
23796+ movq_cfi r12, R12
23797
23798 /* Save rbp so that we can unwind from get_irq_regs() */
23799- movq_cfi rbp, 0
23800+ movq_cfi rbp, RBP
23801
23802 /* Save previous stack value */
23803 movq %rsp, %rsi
23804
23805- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23806- testl $3, CS-RBP(%rsi)
23807+ movq %rsp,%rdi /* arg1 for handler */
23808+ testb $3, CS(%rsi)
23809 je 1f
23810 SWAPGS
23811 /*
23812@@ -711,6 +1199,18 @@ END(interrupt)
23813 0x06 /* DW_OP_deref */, \
23814 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23815 0x22 /* DW_OP_plus */
23816+
23817+#ifdef CONFIG_PAX_MEMORY_UDEREF
23818+ testb $3, CS(%rdi)
23819+ jnz 1f
23820+ pax_enter_kernel
23821+ jmp 2f
23822+1: pax_enter_kernel_user
23823+2:
23824+#else
23825+ pax_enter_kernel
23826+#endif
23827+
23828 /* We entered an interrupt context - irqs are off: */
23829 TRACE_IRQS_OFF
23830
23831@@ -735,14 +1235,14 @@ ret_from_intr:
23832
23833 /* Restore saved previous stack */
23834 popq %rsi
23835- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23836- leaq ARGOFFSET-RBP(%rsi), %rsp
23837+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23838+ movq %rsi, %rsp
23839 CFI_DEF_CFA_REGISTER rsp
23840- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23841+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23842
23843 exit_intr:
23844 GET_THREAD_INFO(%rcx)
23845- testl $3,CS-ARGOFFSET(%rsp)
23846+ testb $3,CS-ARGOFFSET(%rsp)
23847 je retint_kernel
23848
23849 /* Interrupt came from user space */
23850@@ -764,14 +1264,16 @@ retint_swapgs: /* return to user-space */
23851 * The iretq could re-enable interrupts:
23852 */
23853 DISABLE_INTERRUPTS(CLBR_ANY)
23854+ pax_exit_kernel_user
23855+retint_swapgs_pax:
23856 TRACE_IRQS_IRETQ
23857
23858 /*
23859 * Try to use SYSRET instead of IRET if we're returning to
23860 * a completely clean 64-bit userspace context.
23861 */
23862- movq (RCX-R11)(%rsp), %rcx
23863- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
23864+ movq (RCX-ARGOFFSET)(%rsp), %rcx
23865+ cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */
23866 jne opportunistic_sysret_failed
23867
23868 /*
23869@@ -792,7 +1294,7 @@ retint_swapgs: /* return to user-space */
23870 shr $__VIRTUAL_MASK_SHIFT, %rcx
23871 jnz opportunistic_sysret_failed
23872
23873- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
23874+ cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */
23875 jne opportunistic_sysret_failed
23876
23877 movq (R11-ARGOFFSET)(%rsp), %r11
23878@@ -838,6 +1340,27 @@ opportunistic_sysret_failed:
23879
23880 retint_restore_args: /* return to kernel space */
23881 DISABLE_INTERRUPTS(CLBR_ANY)
23882+ pax_exit_kernel
23883+
23884+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23885+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23886+ * namely calling EFI runtime services with a phys mapping. We're
23887+ * starting off with NOPs and patch in the real instrumentation
23888+ * (BTS/OR) before starting any userland process; even before starting
23889+ * up the APs.
23890+ */
23891+ .pushsection .altinstr_replacement, "a"
23892+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23893+ 602:
23894+ .popsection
23895+ 603: .fill 602b-601b, 1, 0x90
23896+ .pushsection .altinstructions, "a"
23897+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23898+ .popsection
23899+#else
23900+ pax_force_retaddr (RIP-ARGOFFSET)
23901+#endif
23902+
23903 /*
23904 * The iretq could re-enable interrupts:
23905 */
23906@@ -875,15 +1398,15 @@ native_irq_return_ldt:
23907 SWAPGS
23908 movq PER_CPU_VAR(espfix_waddr),%rdi
23909 movq %rax,(0*8)(%rdi) /* RAX */
23910- movq (2*8)(%rsp),%rax /* RIP */
23911+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23912 movq %rax,(1*8)(%rdi)
23913- movq (3*8)(%rsp),%rax /* CS */
23914+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23915 movq %rax,(2*8)(%rdi)
23916- movq (4*8)(%rsp),%rax /* RFLAGS */
23917+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23918 movq %rax,(3*8)(%rdi)
23919- movq (6*8)(%rsp),%rax /* SS */
23920+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23921 movq %rax,(5*8)(%rdi)
23922- movq (5*8)(%rsp),%rax /* RSP */
23923+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23924 movq %rax,(4*8)(%rdi)
23925 andl $0xffff0000,%eax
23926 popq_cfi %rdi
23927@@ -937,7 +1460,7 @@ ENTRY(retint_kernel)
23928 jmp exit_intr
23929 #endif
23930 CFI_ENDPROC
23931-END(common_interrupt)
23932+ENDPROC(common_interrupt)
23933
23934 /*
23935 * APIC interrupts.
23936@@ -951,7 +1474,7 @@ ENTRY(\sym)
23937 interrupt \do_sym
23938 jmp ret_from_intr
23939 CFI_ENDPROC
23940-END(\sym)
23941+ENDPROC(\sym)
23942 .endm
23943
23944 #ifdef CONFIG_TRACING
23945@@ -1024,7 +1547,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23946 /*
23947 * Exception entry points.
23948 */
23949-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23950+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23951
23952 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23953 ENTRY(\sym)
23954@@ -1080,6 +1603,12 @@ ENTRY(\sym)
23955 .endif
23956
23957 .if \shift_ist != -1
23958+#ifdef CONFIG_SMP
23959+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23960+ lea init_tss(%r13), %r13
23961+#else
23962+ lea init_tss(%rip), %r13
23963+#endif
23964 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23965 .endif
23966
23967@@ -1126,7 +1655,7 @@ ENTRY(\sym)
23968 .endif
23969
23970 CFI_ENDPROC
23971-END(\sym)
23972+ENDPROC(\sym)
23973 .endm
23974
23975 #ifdef CONFIG_TRACING
23976@@ -1167,9 +1696,10 @@ gs_change:
23977 2: mfence /* workaround */
23978 SWAPGS
23979 popfq_cfi
23980+ pax_force_retaddr
23981 ret
23982 CFI_ENDPROC
23983-END(native_load_gs_index)
23984+ENDPROC(native_load_gs_index)
23985
23986 _ASM_EXTABLE(gs_change,bad_gs)
23987 .section .fixup,"ax"
23988@@ -1197,9 +1727,10 @@ ENTRY(do_softirq_own_stack)
23989 CFI_DEF_CFA_REGISTER rsp
23990 CFI_ADJUST_CFA_OFFSET -8
23991 decl PER_CPU_VAR(irq_count)
23992+ pax_force_retaddr
23993 ret
23994 CFI_ENDPROC
23995-END(do_softirq_own_stack)
23996+ENDPROC(do_softirq_own_stack)
23997
23998 #ifdef CONFIG_XEN
23999 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24000@@ -1240,7 +1771,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24001 #endif
24002 jmp error_exit
24003 CFI_ENDPROC
24004-END(xen_do_hypervisor_callback)
24005+ENDPROC(xen_do_hypervisor_callback)
24006
24007 /*
24008 * Hypervisor uses this for application faults while it executes.
24009@@ -1299,7 +1830,7 @@ ENTRY(xen_failsafe_callback)
24010 SAVE_ALL
24011 jmp error_exit
24012 CFI_ENDPROC
24013-END(xen_failsafe_callback)
24014+ENDPROC(xen_failsafe_callback)
24015
24016 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24017 xen_hvm_callback_vector xen_evtchn_do_upcall
24018@@ -1344,18 +1875,25 @@ ENTRY(paranoid_exit)
24019 DEFAULT_FRAME
24020 DISABLE_INTERRUPTS(CLBR_NONE)
24021 TRACE_IRQS_OFF_DEBUG
24022- testl %ebx,%ebx /* swapgs needed? */
24023+ testl $1,%ebx /* swapgs needed? */
24024 jnz paranoid_restore
24025+#ifdef CONFIG_PAX_MEMORY_UDEREF
24026+ pax_exit_kernel_user
24027+#else
24028+ pax_exit_kernel
24029+#endif
24030 TRACE_IRQS_IRETQ 0
24031 SWAPGS_UNSAFE_STACK
24032 RESTORE_ALL 8
24033 INTERRUPT_RETURN
24034 paranoid_restore:
24035+ pax_exit_kernel
24036 TRACE_IRQS_IRETQ_DEBUG 0
24037 RESTORE_ALL 8
24038+ pax_force_retaddr_bts
24039 INTERRUPT_RETURN
24040 CFI_ENDPROC
24041-END(paranoid_exit)
24042+ENDPROC(paranoid_exit)
24043
24044 /*
24045 * Exception entry point. This expects an error code/orig_rax on the stack.
24046@@ -1382,12 +1920,23 @@ ENTRY(error_entry)
24047 movq %r14, R14+8(%rsp)
24048 movq %r15, R15+8(%rsp)
24049 xorl %ebx,%ebx
24050- testl $3,CS+8(%rsp)
24051+ testb $3,CS+8(%rsp)
24052 je error_kernelspace
24053 error_swapgs:
24054 SWAPGS
24055 error_sti:
24056+#ifdef CONFIG_PAX_MEMORY_UDEREF
24057+ testb $3, CS+8(%rsp)
24058+ jnz 1f
24059+ pax_enter_kernel
24060+ jmp 2f
24061+1: pax_enter_kernel_user
24062+2:
24063+#else
24064+ pax_enter_kernel
24065+#endif
24066 TRACE_IRQS_OFF
24067+ pax_force_retaddr
24068 ret
24069
24070 /*
24071@@ -1422,7 +1971,7 @@ error_bad_iret:
24072 decl %ebx /* Return to usergs */
24073 jmp error_sti
24074 CFI_ENDPROC
24075-END(error_entry)
24076+ENDPROC(error_entry)
24077
24078
24079 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24080@@ -1433,7 +1982,7 @@ ENTRY(error_exit)
24081 DISABLE_INTERRUPTS(CLBR_NONE)
24082 TRACE_IRQS_OFF
24083 GET_THREAD_INFO(%rcx)
24084- testl %eax,%eax
24085+ testl $1,%eax
24086 jne retint_kernel
24087 LOCKDEP_SYS_EXIT_IRQ
24088 movl TI_flags(%rcx),%edx
24089@@ -1442,7 +1991,7 @@ ENTRY(error_exit)
24090 jnz retint_careful
24091 jmp retint_swapgs
24092 CFI_ENDPROC
24093-END(error_exit)
24094+ENDPROC(error_exit)
24095
24096 /*
24097 * Test if a given stack is an NMI stack or not.
24098@@ -1500,9 +2049,11 @@ ENTRY(nmi)
24099 * If %cs was not the kernel segment, then the NMI triggered in user
24100 * space, which means it is definitely not nested.
24101 */
24102+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24103+ je 1f
24104 cmpl $__KERNEL_CS, 16(%rsp)
24105 jne first_nmi
24106-
24107+1:
24108 /*
24109 * Check the special variable on the stack to see if NMIs are
24110 * executing.
24111@@ -1536,8 +2087,7 @@ nested_nmi:
24112
24113 1:
24114 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24115- leaq -1*8(%rsp), %rdx
24116- movq %rdx, %rsp
24117+ subq $8, %rsp
24118 CFI_ADJUST_CFA_OFFSET 1*8
24119 leaq -10*8(%rsp), %rdx
24120 pushq_cfi $__KERNEL_DS
24121@@ -1555,6 +2105,7 @@ nested_nmi_out:
24122 CFI_RESTORE rdx
24123
24124 /* No need to check faults here */
24125+# pax_force_retaddr_bts
24126 INTERRUPT_RETURN
24127
24128 CFI_RESTORE_STATE
24129@@ -1651,13 +2202,13 @@ end_repeat_nmi:
24130 subq $ORIG_RAX-R15, %rsp
24131 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24132 /*
24133- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24134+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24135 * as we should not be calling schedule in NMI context.
24136 * Even with normal interrupts enabled. An NMI should not be
24137 * setting NEED_RESCHED or anything that normal interrupts and
24138 * exceptions might do.
24139 */
24140- call save_paranoid
24141+ call save_paranoid_nmi
24142 DEFAULT_FRAME 0
24143
24144 /*
24145@@ -1667,9 +2218,9 @@ end_repeat_nmi:
24146 * NMI itself takes a page fault, the page fault that was preempted
24147 * will read the information from the NMI page fault and not the
24148 * origin fault. Save it off and restore it if it changes.
24149- * Use the r12 callee-saved register.
24150+ * Use the r13 callee-saved register.
24151 */
24152- movq %cr2, %r12
24153+ movq %cr2, %r13
24154
24155 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24156 movq %rsp,%rdi
24157@@ -1678,29 +2229,34 @@ end_repeat_nmi:
24158
24159 /* Did the NMI take a page fault? Restore cr2 if it did */
24160 movq %cr2, %rcx
24161- cmpq %rcx, %r12
24162+ cmpq %rcx, %r13
24163 je 1f
24164- movq %r12, %cr2
24165+ movq %r13, %cr2
24166 1:
24167
24168- testl %ebx,%ebx /* swapgs needed? */
24169+ testl $1,%ebx /* swapgs needed? */
24170 jnz nmi_restore
24171 nmi_swapgs:
24172 SWAPGS_UNSAFE_STACK
24173 nmi_restore:
24174+ pax_exit_kernel_nmi
24175 /* Pop the extra iret frame at once */
24176 RESTORE_ALL 6*8
24177+ testb $3, 8(%rsp)
24178+ jnz 1f
24179+ pax_force_retaddr_bts
24180+1:
24181
24182 /* Clear the NMI executing stack variable */
24183 movq $0, 5*8(%rsp)
24184 jmp irq_return
24185 CFI_ENDPROC
24186-END(nmi)
24187+ENDPROC(nmi)
24188
24189 ENTRY(ignore_sysret)
24190 CFI_STARTPROC
24191 mov $-ENOSYS,%eax
24192 sysret
24193 CFI_ENDPROC
24194-END(ignore_sysret)
24195+ENDPROC(ignore_sysret)
24196
24197diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24198index f5d0730..5bce89c 100644
24199--- a/arch/x86/kernel/espfix_64.c
24200+++ b/arch/x86/kernel/espfix_64.c
24201@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24202 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24203 static void *espfix_pages[ESPFIX_MAX_PAGES];
24204
24205-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24206- __aligned(PAGE_SIZE);
24207+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24208
24209 static unsigned int page_random, slot_random;
24210
24211@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24212 void __init init_espfix_bsp(void)
24213 {
24214 pgd_t *pgd_p;
24215+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24216
24217 /* Install the espfix pud into the kernel page directory */
24218- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24219+ pgd_p = &init_level4_pgt[index];
24220 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24221
24222+#ifdef CONFIG_PAX_PER_CPU_PGD
24223+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24224+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24225+#endif
24226+
24227 /* Randomize the locations */
24228 init_espfix_random();
24229
24230@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24231 set_pte(&pte_p[n*PTE_STRIDE], pte);
24232
24233 /* Job is done for this CPU and any CPU which shares this page */
24234- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24235+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24236
24237 unlock_done:
24238 mutex_unlock(&espfix_init_mutex);
24239diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24240index 8b7b0a5..2395f29 100644
24241--- a/arch/x86/kernel/ftrace.c
24242+++ b/arch/x86/kernel/ftrace.c
24243@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24244 * kernel identity mapping to modify code.
24245 */
24246 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24247- ip = (unsigned long)__va(__pa_symbol(ip));
24248+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24249
24250 return ip;
24251 }
24252@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24253 {
24254 unsigned char replaced[MCOUNT_INSN_SIZE];
24255
24256+ ip = ktla_ktva(ip);
24257+
24258 /*
24259 * Note: Due to modules and __init, code can
24260 * disappear and change, we need to protect against faulting
24261@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24262 unsigned char old[MCOUNT_INSN_SIZE];
24263 int ret;
24264
24265- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24266+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24267
24268 ftrace_update_func = ip;
24269 /* Make sure the breakpoints see the ftrace_update_func update */
24270@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24271 unsigned char replaced[MCOUNT_INSN_SIZE];
24272 unsigned char brk = BREAKPOINT_INSTRUCTION;
24273
24274- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24275+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24276 return -EFAULT;
24277
24278 /* Make sure it is what we expect it to be */
24279diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24280index c4f8d46..2d63ae2 100644
24281--- a/arch/x86/kernel/head64.c
24282+++ b/arch/x86/kernel/head64.c
24283@@ -68,12 +68,12 @@ again:
24284 pgd = *pgd_p;
24285
24286 /*
24287- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24288- * critical -- __PAGE_OFFSET would point us back into the dynamic
24289+ * The use of __early_va rather than __va here is critical:
24290+ * __va would point us back into the dynamic
24291 * range and we might end up looping forever...
24292 */
24293 if (pgd)
24294- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24295+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24296 else {
24297 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24298 reset_early_page_tables();
24299@@ -83,13 +83,13 @@ again:
24300 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24301 for (i = 0; i < PTRS_PER_PUD; i++)
24302 pud_p[i] = 0;
24303- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24304+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24305 }
24306 pud_p += pud_index(address);
24307 pud = *pud_p;
24308
24309 if (pud)
24310- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24311+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24312 else {
24313 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24314 reset_early_page_tables();
24315@@ -99,7 +99,7 @@ again:
24316 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24317 for (i = 0; i < PTRS_PER_PMD; i++)
24318 pmd_p[i] = 0;
24319- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24320+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24321 }
24322 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24323 pmd_p[pmd_index(address)] = pmd;
24324@@ -180,7 +180,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24325 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24326 early_printk("Kernel alive\n");
24327
24328- clear_page(init_level4_pgt);
24329 /* set init_level4_pgt kernel high mapping*/
24330 init_level4_pgt[511] = early_level4_pgt[511];
24331
24332diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24333index f36bd42..0ab4474 100644
24334--- a/arch/x86/kernel/head_32.S
24335+++ b/arch/x86/kernel/head_32.S
24336@@ -26,6 +26,12 @@
24337 /* Physical address */
24338 #define pa(X) ((X) - __PAGE_OFFSET)
24339
24340+#ifdef CONFIG_PAX_KERNEXEC
24341+#define ta(X) (X)
24342+#else
24343+#define ta(X) ((X) - __PAGE_OFFSET)
24344+#endif
24345+
24346 /*
24347 * References to members of the new_cpu_data structure.
24348 */
24349@@ -55,11 +61,7 @@
24350 * and small than max_low_pfn, otherwise will waste some page table entries
24351 */
24352
24353-#if PTRS_PER_PMD > 1
24354-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24355-#else
24356-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24357-#endif
24358+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24359
24360 /* Number of possible pages in the lowmem region */
24361 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24362@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24363 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24364
24365 /*
24366+ * Real beginning of normal "text" segment
24367+ */
24368+ENTRY(stext)
24369+ENTRY(_stext)
24370+
24371+/*
24372 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24373 * %esi points to the real-mode code as a 32-bit pointer.
24374 * CS and DS must be 4 GB flat segments, but we don't depend on
24375@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24376 * can.
24377 */
24378 __HEAD
24379+
24380+#ifdef CONFIG_PAX_KERNEXEC
24381+ jmp startup_32
24382+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24383+.fill PAGE_SIZE-5,1,0xcc
24384+#endif
24385+
24386 ENTRY(startup_32)
24387 movl pa(stack_start),%ecx
24388
24389@@ -106,6 +121,59 @@ ENTRY(startup_32)
24390 2:
24391 leal -__PAGE_OFFSET(%ecx),%esp
24392
24393+#ifdef CONFIG_SMP
24394+ movl $pa(cpu_gdt_table),%edi
24395+ movl $__per_cpu_load,%eax
24396+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24397+ rorl $16,%eax
24398+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24399+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24400+ movl $__per_cpu_end - 1,%eax
24401+ subl $__per_cpu_start,%eax
24402+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24403+#endif
24404+
24405+#ifdef CONFIG_PAX_MEMORY_UDEREF
24406+ movl $NR_CPUS,%ecx
24407+ movl $pa(cpu_gdt_table),%edi
24408+1:
24409+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24410+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24411+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24412+ addl $PAGE_SIZE_asm,%edi
24413+ loop 1b
24414+#endif
24415+
24416+#ifdef CONFIG_PAX_KERNEXEC
24417+ movl $pa(boot_gdt),%edi
24418+ movl $__LOAD_PHYSICAL_ADDR,%eax
24419+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24420+ rorl $16,%eax
24421+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24422+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24423+ rorl $16,%eax
24424+
24425+ ljmp $(__BOOT_CS),$1f
24426+1:
24427+
24428+ movl $NR_CPUS,%ecx
24429+ movl $pa(cpu_gdt_table),%edi
24430+ addl $__PAGE_OFFSET,%eax
24431+1:
24432+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24433+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24434+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24435+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24436+ rorl $16,%eax
24437+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24438+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24439+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24440+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24441+ rorl $16,%eax
24442+ addl $PAGE_SIZE_asm,%edi
24443+ loop 1b
24444+#endif
24445+
24446 /*
24447 * Clear BSS first so that there are no surprises...
24448 */
24449@@ -201,8 +269,11 @@ ENTRY(startup_32)
24450 movl %eax, pa(max_pfn_mapped)
24451
24452 /* Do early initialization of the fixmap area */
24453- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24454- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24455+#ifdef CONFIG_COMPAT_VDSO
24456+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24457+#else
24458+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24459+#endif
24460 #else /* Not PAE */
24461
24462 page_pde_offset = (__PAGE_OFFSET >> 20);
24463@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24464 movl %eax, pa(max_pfn_mapped)
24465
24466 /* Do early initialization of the fixmap area */
24467- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24468- movl %eax,pa(initial_page_table+0xffc)
24469+#ifdef CONFIG_COMPAT_VDSO
24470+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24471+#else
24472+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24473+#endif
24474 #endif
24475
24476 #ifdef CONFIG_PARAVIRT
24477@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24478 cmpl $num_subarch_entries, %eax
24479 jae bad_subarch
24480
24481- movl pa(subarch_entries)(,%eax,4), %eax
24482- subl $__PAGE_OFFSET, %eax
24483- jmp *%eax
24484+ jmp *pa(subarch_entries)(,%eax,4)
24485
24486 bad_subarch:
24487 WEAK(lguest_entry)
24488@@ -261,10 +333,10 @@ WEAK(xen_entry)
24489 __INITDATA
24490
24491 subarch_entries:
24492- .long default_entry /* normal x86/PC */
24493- .long lguest_entry /* lguest hypervisor */
24494- .long xen_entry /* Xen hypervisor */
24495- .long default_entry /* Moorestown MID */
24496+ .long ta(default_entry) /* normal x86/PC */
24497+ .long ta(lguest_entry) /* lguest hypervisor */
24498+ .long ta(xen_entry) /* Xen hypervisor */
24499+ .long ta(default_entry) /* Moorestown MID */
24500 num_subarch_entries = (. - subarch_entries) / 4
24501 .previous
24502 #else
24503@@ -354,6 +426,7 @@ default_entry:
24504 movl pa(mmu_cr4_features),%eax
24505 movl %eax,%cr4
24506
24507+#ifdef CONFIG_X86_PAE
24508 testb $X86_CR4_PAE, %al # check if PAE is enabled
24509 jz enable_paging
24510
24511@@ -382,6 +455,9 @@ default_entry:
24512 /* Make changes effective */
24513 wrmsr
24514
24515+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24516+#endif
24517+
24518 enable_paging:
24519
24520 /*
24521@@ -449,14 +525,20 @@ is486:
24522 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24523 movl %eax,%ss # after changing gdt.
24524
24525- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24526+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24527 movl %eax,%ds
24528 movl %eax,%es
24529
24530 movl $(__KERNEL_PERCPU), %eax
24531 movl %eax,%fs # set this cpu's percpu
24532
24533+#ifdef CONFIG_CC_STACKPROTECTOR
24534 movl $(__KERNEL_STACK_CANARY),%eax
24535+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24536+ movl $(__USER_DS),%eax
24537+#else
24538+ xorl %eax,%eax
24539+#endif
24540 movl %eax,%gs
24541
24542 xorl %eax,%eax # Clear LDT
24543@@ -512,8 +594,11 @@ setup_once:
24544 * relocation. Manually set base address in stack canary
24545 * segment descriptor.
24546 */
24547- movl $gdt_page,%eax
24548+ movl $cpu_gdt_table,%eax
24549 movl $stack_canary,%ecx
24550+#ifdef CONFIG_SMP
24551+ addl $__per_cpu_load,%ecx
24552+#endif
24553 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24554 shrl $16, %ecx
24555 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24556@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24557 cmpl $2,(%esp) # X86_TRAP_NMI
24558 je is_nmi # Ignore NMI
24559
24560- cmpl $2,%ss:early_recursion_flag
24561+ cmpl $1,%ss:early_recursion_flag
24562 je hlt_loop
24563 incl %ss:early_recursion_flag
24564
24565@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24566 pushl (20+6*4)(%esp) /* trapno */
24567 pushl $fault_msg
24568 call printk
24569-#endif
24570 call dump_stack
24571+#endif
24572 hlt_loop:
24573 hlt
24574 jmp hlt_loop
24575@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24576 /* This is the default interrupt "handler" :-) */
24577 ALIGN
24578 ignore_int:
24579- cld
24580 #ifdef CONFIG_PRINTK
24581+ cmpl $2,%ss:early_recursion_flag
24582+ je hlt_loop
24583+ incl %ss:early_recursion_flag
24584+ cld
24585 pushl %eax
24586 pushl %ecx
24587 pushl %edx
24588@@ -617,9 +705,6 @@ ignore_int:
24589 movl $(__KERNEL_DS),%eax
24590 movl %eax,%ds
24591 movl %eax,%es
24592- cmpl $2,early_recursion_flag
24593- je hlt_loop
24594- incl early_recursion_flag
24595 pushl 16(%esp)
24596 pushl 24(%esp)
24597 pushl 32(%esp)
24598@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24599 /*
24600 * BSS section
24601 */
24602-__PAGE_ALIGNED_BSS
24603- .align PAGE_SIZE
24604 #ifdef CONFIG_X86_PAE
24605+.section .initial_pg_pmd,"a",@progbits
24606 initial_pg_pmd:
24607 .fill 1024*KPMDS,4,0
24608 #else
24609+.section .initial_page_table,"a",@progbits
24610 ENTRY(initial_page_table)
24611 .fill 1024,4,0
24612 #endif
24613+.section .initial_pg_fixmap,"a",@progbits
24614 initial_pg_fixmap:
24615 .fill 1024,4,0
24616+.section .empty_zero_page,"a",@progbits
24617 ENTRY(empty_zero_page)
24618 .fill 4096,1,0
24619+.section .swapper_pg_dir,"a",@progbits
24620 ENTRY(swapper_pg_dir)
24621+#ifdef CONFIG_X86_PAE
24622+ .fill 4,8,0
24623+#else
24624 .fill 1024,4,0
24625+#endif
24626
24627 /*
24628 * This starts the data section.
24629 */
24630 #ifdef CONFIG_X86_PAE
24631-__PAGE_ALIGNED_DATA
24632- /* Page-aligned for the benefit of paravirt? */
24633- .align PAGE_SIZE
24634+.section .initial_page_table,"a",@progbits
24635 ENTRY(initial_page_table)
24636 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24637 # if KPMDS == 3
24638@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24639 # error "Kernel PMDs should be 1, 2 or 3"
24640 # endif
24641 .align PAGE_SIZE /* needs to be page-sized too */
24642+
24643+#ifdef CONFIG_PAX_PER_CPU_PGD
24644+ENTRY(cpu_pgd)
24645+ .rept 2*NR_CPUS
24646+ .fill 4,8,0
24647+ .endr
24648+#endif
24649+
24650 #endif
24651
24652 .data
24653 .balign 4
24654 ENTRY(stack_start)
24655- .long init_thread_union+THREAD_SIZE
24656+ .long init_thread_union+THREAD_SIZE-8
24657
24658 __INITRODATA
24659 int_msg:
24660@@ -727,7 +825,7 @@ fault_msg:
24661 * segment size, and 32-bit linear address value:
24662 */
24663
24664- .data
24665+.section .rodata,"a",@progbits
24666 .globl boot_gdt_descr
24667 .globl idt_descr
24668
24669@@ -736,7 +834,7 @@ fault_msg:
24670 .word 0 # 32 bit align gdt_desc.address
24671 boot_gdt_descr:
24672 .word __BOOT_DS+7
24673- .long boot_gdt - __PAGE_OFFSET
24674+ .long pa(boot_gdt)
24675
24676 .word 0 # 32-bit align idt_desc.address
24677 idt_descr:
24678@@ -747,7 +845,7 @@ idt_descr:
24679 .word 0 # 32 bit align gdt_desc.address
24680 ENTRY(early_gdt_descr)
24681 .word GDT_ENTRIES*8-1
24682- .long gdt_page /* Overwritten for secondary CPUs */
24683+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24684
24685 /*
24686 * The boot_gdt must mirror the equivalent in setup.S and is
24687@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24688 .align L1_CACHE_BYTES
24689 ENTRY(boot_gdt)
24690 .fill GDT_ENTRY_BOOT_CS,8,0
24691- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24692- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24693+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24694+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24695+
24696+ .align PAGE_SIZE_asm
24697+ENTRY(cpu_gdt_table)
24698+ .rept NR_CPUS
24699+ .quad 0x0000000000000000 /* NULL descriptor */
24700+ .quad 0x0000000000000000 /* 0x0b reserved */
24701+ .quad 0x0000000000000000 /* 0x13 reserved */
24702+ .quad 0x0000000000000000 /* 0x1b reserved */
24703+
24704+#ifdef CONFIG_PAX_KERNEXEC
24705+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24706+#else
24707+ .quad 0x0000000000000000 /* 0x20 unused */
24708+#endif
24709+
24710+ .quad 0x0000000000000000 /* 0x28 unused */
24711+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24712+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24713+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24714+ .quad 0x0000000000000000 /* 0x4b reserved */
24715+ .quad 0x0000000000000000 /* 0x53 reserved */
24716+ .quad 0x0000000000000000 /* 0x5b reserved */
24717+
24718+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24719+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24720+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24721+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24722+
24723+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24724+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24725+
24726+ /*
24727+ * Segments used for calling PnP BIOS have byte granularity.
24728+ * The code segments and data segments have fixed 64k limits,
24729+ * the transfer segment sizes are set at run time.
24730+ */
24731+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24732+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24733+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24734+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24735+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24736+
24737+ /*
24738+ * The APM segments have byte granularity and their bases
24739+ * are set at run time. All have 64k limits.
24740+ */
24741+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24742+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24743+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24744+
24745+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24746+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24747+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24748+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24749+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24750+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24751+
24752+ /* Be sure this is zeroed to avoid false validations in Xen */
24753+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24754+ .endr
24755diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24756index 6fd514d9..320367e 100644
24757--- a/arch/x86/kernel/head_64.S
24758+++ b/arch/x86/kernel/head_64.S
24759@@ -20,6 +20,8 @@
24760 #include <asm/processor-flags.h>
24761 #include <asm/percpu.h>
24762 #include <asm/nops.h>
24763+#include <asm/cpufeature.h>
24764+#include <asm/alternative-asm.h>
24765
24766 #ifdef CONFIG_PARAVIRT
24767 #include <asm/asm-offsets.h>
24768@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24769 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24770 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24771 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24772+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24773+L3_VMALLOC_START = pud_index(VMALLOC_START)
24774+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24775+L3_VMALLOC_END = pud_index(VMALLOC_END)
24776+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24777+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24778
24779 .text
24780 __HEAD
24781@@ -89,11 +97,26 @@ startup_64:
24782 * Fixup the physical addresses in the page table
24783 */
24784 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24785+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24786+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24787+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24788+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24789+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24790
24791- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24792- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24793+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24794+#ifndef CONFIG_XEN
24795+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24796+#endif
24797
24798+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24799+
24800+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24801+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24802+
24803+ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
24804+ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
24805 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24806+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24807
24808 /*
24809 * Set up the identity mapping for the switchover. These
24810@@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
24811 * after the boot processor executes this code.
24812 */
24813
24814+ orq $-1, %rbp
24815 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24816 1:
24817
24818- /* Enable PAE mode and PGE */
24819- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24820+ /* Enable PAE mode and PSE/PGE */
24821+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24822 movq %rcx, %cr4
24823
24824 /* Setup early boot stage 4 level pagetables. */
24825@@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
24826 movl $MSR_EFER, %ecx
24827 rdmsr
24828 btsl $_EFER_SCE, %eax /* Enable System Call */
24829- btl $20,%edi /* No Execute supported? */
24830+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24831 jnc 1f
24832 btsl $_EFER_NX, %eax
24833+ cmpq $-1, %rbp
24834+ je 1f
24835 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24836+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24837+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24838+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24839+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24840+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
24841+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
24842+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24843+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24844+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24845 1: wrmsr /* Make changes effective */
24846
24847 /* Setup cr0 */
24848@@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
24849 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24850 * address given in m16:64.
24851 */
24852+ pax_set_fptr_mask
24853 movq initial_code(%rip),%rax
24854 pushq $0 # fake return address to stop unwinder
24855 pushq $__KERNEL_CS # set correct cs
24856@@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
24857 .quad INIT_PER_CPU_VAR(irq_stack_union)
24858
24859 GLOBAL(stack_start)
24860- .quad init_thread_union+THREAD_SIZE-8
24861+ .quad init_thread_union+THREAD_SIZE-16
24862 .word 0
24863 __FINITDATA
24864
24865@@ -391,7 +427,7 @@ ENTRY(early_idt_handler)
24866 call dump_stack
24867 #ifdef CONFIG_KALLSYMS
24868 leaq early_idt_ripmsg(%rip),%rdi
24869- movq 40(%rsp),%rsi # %rip again
24870+ movq 88(%rsp),%rsi # %rip again
24871 call __print_symbol
24872 #endif
24873 #endif /* EARLY_PRINTK */
24874@@ -420,6 +456,7 @@ ENDPROC(early_idt_handler)
24875 early_recursion_flag:
24876 .long 0
24877
24878+ .section .rodata,"a",@progbits
24879 #ifdef CONFIG_EARLY_PRINTK
24880 early_idt_msg:
24881 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24882@@ -447,29 +484,52 @@ NEXT_PAGE(early_level4_pgt)
24883 NEXT_PAGE(early_dynamic_pgts)
24884 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24885
24886- .data
24887+ .section .rodata,"a",@progbits
24888
24889-#ifndef CONFIG_XEN
24890 NEXT_PAGE(init_level4_pgt)
24891- .fill 512,8,0
24892-#else
24893-NEXT_PAGE(init_level4_pgt)
24894- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24895 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24896 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24897+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24898+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24899+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24900+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24901+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24902+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24903 .org init_level4_pgt + L4_START_KERNEL*8, 0
24904 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24905 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24906
24907+#ifdef CONFIG_PAX_PER_CPU_PGD
24908+NEXT_PAGE(cpu_pgd)
24909+ .rept 2*NR_CPUS
24910+ .fill 512,8,0
24911+ .endr
24912+#endif
24913+
24914 NEXT_PAGE(level3_ident_pgt)
24915 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24916+#ifdef CONFIG_XEN
24917 .fill 511, 8, 0
24918+#else
24919+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24920+ .fill 510,8,0
24921+#endif
24922+
24923+NEXT_PAGE(level3_vmalloc_start_pgt)
24924+ .fill 512,8,0
24925+
24926+NEXT_PAGE(level3_vmalloc_end_pgt)
24927+ .fill 512,8,0
24928+
24929+NEXT_PAGE(level3_vmemmap_pgt)
24930+ .fill L3_VMEMMAP_START,8,0
24931+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24932+
24933 NEXT_PAGE(level2_ident_pgt)
24934- /* Since I easily can, map the first 1G.
24935+ /* Since I easily can, map the first 2G.
24936 * Don't set NX because code runs from these pages.
24937 */
24938- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
24939-#endif
24940+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
24941
24942 NEXT_PAGE(level3_kernel_pgt)
24943 .fill L3_START_KERNEL,8,0
24944@@ -477,6 +537,9 @@ NEXT_PAGE(level3_kernel_pgt)
24945 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
24946 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24947
24948+NEXT_PAGE(level2_vmemmap_pgt)
24949+ .fill 512,8,0
24950+
24951 NEXT_PAGE(level2_kernel_pgt)
24952 /*
24953 * 512 MB kernel mapping. We spend a full page on this pagetable
24954@@ -492,23 +555,61 @@ NEXT_PAGE(level2_kernel_pgt)
24955 KERNEL_IMAGE_SIZE/PMD_SIZE)
24956
24957 NEXT_PAGE(level2_fixmap_pgt)
24958- .fill 506,8,0
24959- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24960- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24961- .fill 5,8,0
24962+ .fill 504,8,0
24963+ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
24964+ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
24965+ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
24966+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24967+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24968+ .fill 4,8,0
24969
24970 NEXT_PAGE(level1_fixmap_pgt)
24971+ .fill 3*512,8,0
24972+
24973+NEXT_PAGE(level1_vsyscall_pgt)
24974 .fill 512,8,0
24975
24976 #undef PMDS
24977
24978- .data
24979+ .align PAGE_SIZE
24980+ENTRY(cpu_gdt_table)
24981+ .rept NR_CPUS
24982+ .quad 0x0000000000000000 /* NULL descriptor */
24983+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24984+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
24985+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
24986+ .quad 0x00cffb000000ffff /* __USER32_CS */
24987+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24988+ .quad 0x00affb000000ffff /* __USER_CS */
24989+
24990+#ifdef CONFIG_PAX_KERNEXEC
24991+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24992+#else
24993+ .quad 0x0 /* unused */
24994+#endif
24995+
24996+ .quad 0,0 /* TSS */
24997+ .quad 0,0 /* LDT */
24998+ .quad 0,0,0 /* three TLS descriptors */
24999+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25000+ /* asm/segment.h:GDT_ENTRIES must match this */
25001+
25002+#ifdef CONFIG_PAX_MEMORY_UDEREF
25003+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25004+#else
25005+ .quad 0x0 /* unused */
25006+#endif
25007+
25008+ /* zero the remaining page */
25009+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25010+ .endr
25011+
25012 .align 16
25013 .globl early_gdt_descr
25014 early_gdt_descr:
25015 .word GDT_ENTRIES*8-1
25016 early_gdt_descr_base:
25017- .quad INIT_PER_CPU_VAR(gdt_page)
25018+ .quad cpu_gdt_table
25019
25020 ENTRY(phys_base)
25021 /* This must match the first entry in level2_kernel_pgt */
25022@@ -532,8 +633,8 @@ NEXT_PAGE(kasan_zero_pud)
25023
25024
25025 #include "../../x86/xen/xen-head.S"
25026-
25027- __PAGE_ALIGNED_BSS
25028+
25029+ .section .rodata,"a",@progbits
25030 NEXT_PAGE(empty_zero_page)
25031 .skip PAGE_SIZE
25032
25033diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25034index 05fd74f..c3548b1 100644
25035--- a/arch/x86/kernel/i386_ksyms_32.c
25036+++ b/arch/x86/kernel/i386_ksyms_32.c
25037@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25038 EXPORT_SYMBOL(cmpxchg8b_emu);
25039 #endif
25040
25041+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25042+
25043 /* Networking helper routines. */
25044 EXPORT_SYMBOL(csum_partial_copy_generic);
25045+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25046+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25047
25048 EXPORT_SYMBOL(__get_user_1);
25049 EXPORT_SYMBOL(__get_user_2);
25050@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25051 EXPORT_SYMBOL(___preempt_schedule_context);
25052 #endif
25053 #endif
25054+
25055+#ifdef CONFIG_PAX_KERNEXEC
25056+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25057+#endif
25058+
25059+#ifdef CONFIG_PAX_PER_CPU_PGD
25060+EXPORT_SYMBOL(cpu_pgd);
25061+#endif
25062diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25063index d5651fc..29c740d 100644
25064--- a/arch/x86/kernel/i387.c
25065+++ b/arch/x86/kernel/i387.c
25066@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25067 static inline bool interrupted_user_mode(void)
25068 {
25069 struct pt_regs *regs = get_irq_regs();
25070- return regs && user_mode_vm(regs);
25071+ return regs && user_mode(regs);
25072 }
25073
25074 /*
25075diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25076index e7cc537..67d7372 100644
25077--- a/arch/x86/kernel/i8259.c
25078+++ b/arch/x86/kernel/i8259.c
25079@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25080 static void make_8259A_irq(unsigned int irq)
25081 {
25082 disable_irq_nosync(irq);
25083- io_apic_irqs &= ~(1<<irq);
25084+ io_apic_irqs &= ~(1UL<<irq);
25085 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25086 enable_irq(irq);
25087 }
25088@@ -208,7 +208,7 @@ spurious_8259A_irq:
25089 "spurious 8259A interrupt: IRQ%d.\n", irq);
25090 spurious_irq_mask |= irqmask;
25091 }
25092- atomic_inc(&irq_err_count);
25093+ atomic_inc_unchecked(&irq_err_count);
25094 /*
25095 * Theoretically we do not have to handle this IRQ,
25096 * but in Linux this does not cause problems and is
25097@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25098 /* (slave's support for AEOI in flat mode is to be investigated) */
25099 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25100
25101+ pax_open_kernel();
25102 if (auto_eoi)
25103 /*
25104 * In AEOI mode we just have to mask the interrupt
25105 * when acking.
25106 */
25107- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25108+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25109 else
25110- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25111+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25112+ pax_close_kernel();
25113
25114 udelay(100); /* wait for 8259A to initialize */
25115
25116diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25117index a979b5b..1d6db75 100644
25118--- a/arch/x86/kernel/io_delay.c
25119+++ b/arch/x86/kernel/io_delay.c
25120@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25121 * Quirk table for systems that misbehave (lock up, etc.) if port
25122 * 0x80 is used:
25123 */
25124-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25125+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25126 {
25127 .callback = dmi_io_delay_0xed_port,
25128 .ident = "Compaq Presario V6000",
25129diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25130index 4ddaf66..49d5c18 100644
25131--- a/arch/x86/kernel/ioport.c
25132+++ b/arch/x86/kernel/ioport.c
25133@@ -6,6 +6,7 @@
25134 #include <linux/sched.h>
25135 #include <linux/kernel.h>
25136 #include <linux/capability.h>
25137+#include <linux/security.h>
25138 #include <linux/errno.h>
25139 #include <linux/types.h>
25140 #include <linux/ioport.h>
25141@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25142 return -EINVAL;
25143 if (turn_on && !capable(CAP_SYS_RAWIO))
25144 return -EPERM;
25145+#ifdef CONFIG_GRKERNSEC_IO
25146+ if (turn_on && grsec_disable_privio) {
25147+ gr_handle_ioperm();
25148+ return -ENODEV;
25149+ }
25150+#endif
25151
25152 /*
25153 * If it's the first ioperm() call in this thread's lifetime, set the
25154@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25155 * because the ->io_bitmap_max value must match the bitmap
25156 * contents:
25157 */
25158- tss = &per_cpu(init_tss, get_cpu());
25159+ tss = init_tss + get_cpu();
25160
25161 if (turn_on)
25162 bitmap_clear(t->io_bitmap_ptr, from, num);
25163@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25164 if (level > old) {
25165 if (!capable(CAP_SYS_RAWIO))
25166 return -EPERM;
25167+#ifdef CONFIG_GRKERNSEC_IO
25168+ if (grsec_disable_privio) {
25169+ gr_handle_iopl();
25170+ return -ENODEV;
25171+ }
25172+#endif
25173 }
25174 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25175 t->iopl = level << 12;
25176diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25177index 67b1cbe..6ad4cbc 100644
25178--- a/arch/x86/kernel/irq.c
25179+++ b/arch/x86/kernel/irq.c
25180@@ -22,7 +22,7 @@
25181 #define CREATE_TRACE_POINTS
25182 #include <asm/trace/irq_vectors.h>
25183
25184-atomic_t irq_err_count;
25185+atomic_unchecked_t irq_err_count;
25186
25187 /* Function pointer for generic interrupt vector handling */
25188 void (*x86_platform_ipi_callback)(void) = NULL;
25189@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25190 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25191 seq_puts(p, " Hypervisor callback interrupts\n");
25192 #endif
25193- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25194+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25195 #if defined(CONFIG_X86_IO_APIC)
25196- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25197+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25198 #endif
25199 return 0;
25200 }
25201@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25202
25203 u64 arch_irq_stat(void)
25204 {
25205- u64 sum = atomic_read(&irq_err_count);
25206+ u64 sum = atomic_read_unchecked(&irq_err_count);
25207 return sum;
25208 }
25209
25210diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25211index 28d28f5..e6cc9ae 100644
25212--- a/arch/x86/kernel/irq_32.c
25213+++ b/arch/x86/kernel/irq_32.c
25214@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25215
25216 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25217
25218+extern void gr_handle_kernel_exploit(void);
25219+
25220 int sysctl_panic_on_stackoverflow __read_mostly;
25221
25222 /* Debugging check for stack overflow: is there less than 1KB free? */
25223@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25224 __asm__ __volatile__("andl %%esp,%0" :
25225 "=r" (sp) : "0" (THREAD_SIZE - 1));
25226
25227- return sp < (sizeof(struct thread_info) + STACK_WARN);
25228+ return sp < STACK_WARN;
25229 }
25230
25231 static void print_stack_overflow(void)
25232 {
25233 printk(KERN_WARNING "low stack detected by irq handler\n");
25234 dump_stack();
25235+ gr_handle_kernel_exploit();
25236 if (sysctl_panic_on_stackoverflow)
25237 panic("low stack detected by irq handler - check messages\n");
25238 }
25239@@ -77,10 +80,9 @@ static inline void *current_stack(void)
25240 static inline int
25241 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25242 {
25243- struct irq_stack *curstk, *irqstk;
25244+ struct irq_stack *irqstk;
25245 u32 *isp, *prev_esp, arg1, arg2;
25246
25247- curstk = (struct irq_stack *) current_stack();
25248 irqstk = __this_cpu_read(hardirq_stack);
25249
25250 /*
25251@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25252 * handler) we can't do that and just have to keep using the
25253 * current stack (which is the irq stack already after all)
25254 */
25255- if (unlikely(curstk == irqstk))
25256+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25257 return 0;
25258
25259- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25260+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25261
25262 /* Save the next esp at the bottom of the stack */
25263 prev_esp = (u32 *)irqstk;
25264 *prev_esp = current_stack_pointer();
25265
25266+#ifdef CONFIG_PAX_MEMORY_UDEREF
25267+ __set_fs(MAKE_MM_SEG(0));
25268+#endif
25269+
25270 if (unlikely(overflow))
25271 call_on_stack(print_stack_overflow, isp);
25272
25273@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25274 : "0" (irq), "1" (desc), "2" (isp),
25275 "D" (desc->handle_irq)
25276 : "memory", "cc", "ecx");
25277+
25278+#ifdef CONFIG_PAX_MEMORY_UDEREF
25279+ __set_fs(current_thread_info()->addr_limit);
25280+#endif
25281+
25282 return 1;
25283 }
25284
25285@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25286 */
25287 void irq_ctx_init(int cpu)
25288 {
25289- struct irq_stack *irqstk;
25290-
25291 if (per_cpu(hardirq_stack, cpu))
25292 return;
25293
25294- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25295- THREADINFO_GFP,
25296- THREAD_SIZE_ORDER));
25297- per_cpu(hardirq_stack, cpu) = irqstk;
25298-
25299- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25300- THREADINFO_GFP,
25301- THREAD_SIZE_ORDER));
25302- per_cpu(softirq_stack, cpu) = irqstk;
25303-
25304- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25305- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25306+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25307+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25308 }
25309
25310 void do_softirq_own_stack(void)
25311 {
25312- struct thread_info *curstk;
25313 struct irq_stack *irqstk;
25314 u32 *isp, *prev_esp;
25315
25316- curstk = current_stack();
25317 irqstk = __this_cpu_read(softirq_stack);
25318
25319 /* build the stack frame on the softirq stack */
25320@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25321 prev_esp = (u32 *)irqstk;
25322 *prev_esp = current_stack_pointer();
25323
25324+#ifdef CONFIG_PAX_MEMORY_UDEREF
25325+ __set_fs(MAKE_MM_SEG(0));
25326+#endif
25327+
25328 call_on_stack(__do_softirq, isp);
25329+
25330+#ifdef CONFIG_PAX_MEMORY_UDEREF
25331+ __set_fs(current_thread_info()->addr_limit);
25332+#endif
25333+
25334 }
25335
25336 bool handle_irq(unsigned irq, struct pt_regs *regs)
25337@@ -165,7 +171,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25338 if (unlikely(!desc))
25339 return false;
25340
25341- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25342+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25343 if (unlikely(overflow))
25344 print_stack_overflow();
25345 desc->handle_irq(irq, desc);
25346diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25347index e4b503d..824fce8 100644
25348--- a/arch/x86/kernel/irq_64.c
25349+++ b/arch/x86/kernel/irq_64.c
25350@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25351 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25352 EXPORT_PER_CPU_SYMBOL(irq_regs);
25353
25354+extern void gr_handle_kernel_exploit(void);
25355+
25356 int sysctl_panic_on_stackoverflow;
25357
25358 /*
25359@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25360 u64 estack_top, estack_bottom;
25361 u64 curbase = (u64)task_stack_page(current);
25362
25363- if (user_mode_vm(regs))
25364+ if (user_mode(regs))
25365 return;
25366
25367 if (regs->sp >= curbase + sizeof(struct thread_info) +
25368@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25369 irq_stack_top, irq_stack_bottom,
25370 estack_top, estack_bottom);
25371
25372+ gr_handle_kernel_exploit();
25373+
25374 if (sysctl_panic_on_stackoverflow)
25375 panic("low stack detected by irq handler - check messages\n");
25376 #endif
25377diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25378index 26d5a55..a01160a 100644
25379--- a/arch/x86/kernel/jump_label.c
25380+++ b/arch/x86/kernel/jump_label.c
25381@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25382 * Jump label is enabled for the first time.
25383 * So we expect a default_nop...
25384 */
25385- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25386+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25387 != 0))
25388 bug_at((void *)entry->code, __LINE__);
25389 } else {
25390@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25391 * ...otherwise expect an ideal_nop. Otherwise
25392 * something went horribly wrong.
25393 */
25394- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25395+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25396 != 0))
25397 bug_at((void *)entry->code, __LINE__);
25398 }
25399@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25400 * are converting the default nop to the ideal nop.
25401 */
25402 if (init) {
25403- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25404+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25405 bug_at((void *)entry->code, __LINE__);
25406 } else {
25407 code.jump = 0xe9;
25408 code.offset = entry->target -
25409 (entry->code + JUMP_LABEL_NOP_SIZE);
25410- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25411+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25412 bug_at((void *)entry->code, __LINE__);
25413 }
25414 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25415diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25416index 25ecd56..e12482f 100644
25417--- a/arch/x86/kernel/kgdb.c
25418+++ b/arch/x86/kernel/kgdb.c
25419@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25420 #ifdef CONFIG_X86_32
25421 switch (regno) {
25422 case GDB_SS:
25423- if (!user_mode_vm(regs))
25424+ if (!user_mode(regs))
25425 *(unsigned long *)mem = __KERNEL_DS;
25426 break;
25427 case GDB_SP:
25428- if (!user_mode_vm(regs))
25429+ if (!user_mode(regs))
25430 *(unsigned long *)mem = kernel_stack_pointer(regs);
25431 break;
25432 case GDB_GS:
25433@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25434 bp->attr.bp_addr = breakinfo[breakno].addr;
25435 bp->attr.bp_len = breakinfo[breakno].len;
25436 bp->attr.bp_type = breakinfo[breakno].type;
25437- info->address = breakinfo[breakno].addr;
25438+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25439+ info->address = ktla_ktva(breakinfo[breakno].addr);
25440+ else
25441+ info->address = breakinfo[breakno].addr;
25442 info->len = breakinfo[breakno].len;
25443 info->type = breakinfo[breakno].type;
25444 val = arch_install_hw_breakpoint(bp);
25445@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25446 case 'k':
25447 /* clear the trace bit */
25448 linux_regs->flags &= ~X86_EFLAGS_TF;
25449- atomic_set(&kgdb_cpu_doing_single_step, -1);
25450+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25451
25452 /* set the trace bit if we're stepping */
25453 if (remcomInBuffer[0] == 's') {
25454 linux_regs->flags |= X86_EFLAGS_TF;
25455- atomic_set(&kgdb_cpu_doing_single_step,
25456+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25457 raw_smp_processor_id());
25458 }
25459
25460@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25461
25462 switch (cmd) {
25463 case DIE_DEBUG:
25464- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25465+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25466 if (user_mode(regs))
25467 return single_step_cont(regs, args);
25468 break;
25469@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25470 #endif /* CONFIG_DEBUG_RODATA */
25471
25472 bpt->type = BP_BREAKPOINT;
25473- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25474+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25475 BREAK_INSTR_SIZE);
25476 if (err)
25477 return err;
25478- err = probe_kernel_write((char *)bpt->bpt_addr,
25479+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25480 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25481 #ifdef CONFIG_DEBUG_RODATA
25482 if (!err)
25483@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25484 return -EBUSY;
25485 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25486 BREAK_INSTR_SIZE);
25487- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25488+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25489 if (err)
25490 return err;
25491 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25492@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25493 if (mutex_is_locked(&text_mutex))
25494 goto knl_write;
25495 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25496- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25497+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25498 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25499 goto knl_write;
25500 return err;
25501 knl_write:
25502 #endif /* CONFIG_DEBUG_RODATA */
25503- return probe_kernel_write((char *)bpt->bpt_addr,
25504+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25505 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25506 }
25507
25508diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25509index 4e3d5a9..03fffd8 100644
25510--- a/arch/x86/kernel/kprobes/core.c
25511+++ b/arch/x86/kernel/kprobes/core.c
25512@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25513 s32 raddr;
25514 } __packed *insn;
25515
25516- insn = (struct __arch_relative_insn *)from;
25517+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25518+
25519+ pax_open_kernel();
25520 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25521 insn->op = op;
25522+ pax_close_kernel();
25523 }
25524
25525 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25526@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25527 kprobe_opcode_t opcode;
25528 kprobe_opcode_t *orig_opcodes = opcodes;
25529
25530- if (search_exception_tables((unsigned long)opcodes))
25531+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25532 return 0; /* Page fault may occur on this address. */
25533
25534 retry:
25535@@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25536 * Fortunately, we know that the original code is the ideal 5-byte
25537 * long NOP.
25538 */
25539- memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25540+ memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25541 if (faddr)
25542 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25543 else
25544 buf[0] = kp->opcode;
25545- return (unsigned long)buf;
25546+ return ktva_ktla((unsigned long)buf);
25547 }
25548
25549 /*
25550@@ -364,7 +367,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25551 /* Another subsystem puts a breakpoint, failed to recover */
25552 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25553 return 0;
25554+ pax_open_kernel();
25555 memcpy(dest, insn.kaddr, insn.length);
25556+ pax_close_kernel();
25557
25558 #ifdef CONFIG_X86_64
25559 if (insn_rip_relative(&insn)) {
25560@@ -391,7 +396,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25561 return 0;
25562 }
25563 disp = (u8 *) dest + insn_offset_displacement(&insn);
25564+ pax_open_kernel();
25565 *(s32 *) disp = (s32) newdisp;
25566+ pax_close_kernel();
25567 }
25568 #endif
25569 return insn.length;
25570@@ -533,7 +540,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25571 * nor set current_kprobe, because it doesn't use single
25572 * stepping.
25573 */
25574- regs->ip = (unsigned long)p->ainsn.insn;
25575+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25576 preempt_enable_no_resched();
25577 return;
25578 }
25579@@ -550,9 +557,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25580 regs->flags &= ~X86_EFLAGS_IF;
25581 /* single step inline if the instruction is an int3 */
25582 if (p->opcode == BREAKPOINT_INSTRUCTION)
25583- regs->ip = (unsigned long)p->addr;
25584+ regs->ip = ktla_ktva((unsigned long)p->addr);
25585 else
25586- regs->ip = (unsigned long)p->ainsn.insn;
25587+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25588 }
25589 NOKPROBE_SYMBOL(setup_singlestep);
25590
25591@@ -602,7 +609,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25592 struct kprobe *p;
25593 struct kprobe_ctlblk *kcb;
25594
25595- if (user_mode_vm(regs))
25596+ if (user_mode(regs))
25597 return 0;
25598
25599 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25600@@ -637,7 +644,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25601 setup_singlestep(p, regs, kcb, 0);
25602 return 1;
25603 }
25604- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25605+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25606 /*
25607 * The breakpoint instruction was removed right
25608 * after we hit it. Another cpu has removed
25609@@ -684,6 +691,9 @@ static void __used kretprobe_trampoline_holder(void)
25610 " movq %rax, 152(%rsp)\n"
25611 RESTORE_REGS_STRING
25612 " popfq\n"
25613+#ifdef KERNEXEC_PLUGIN
25614+ " btsq $63,(%rsp)\n"
25615+#endif
25616 #else
25617 " pushf\n"
25618 SAVE_REGS_STRING
25619@@ -824,7 +834,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25620 struct kprobe_ctlblk *kcb)
25621 {
25622 unsigned long *tos = stack_addr(regs);
25623- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25624+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25625 unsigned long orig_ip = (unsigned long)p->addr;
25626 kprobe_opcode_t *insn = p->ainsn.insn;
25627
25628@@ -1007,7 +1017,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25629 struct die_args *args = data;
25630 int ret = NOTIFY_DONE;
25631
25632- if (args->regs && user_mode_vm(args->regs))
25633+ if (args->regs && user_mode(args->regs))
25634 return ret;
25635
25636 if (val == DIE_GPF) {
25637diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25638index 7b3b9d1..e2478b91 100644
25639--- a/arch/x86/kernel/kprobes/opt.c
25640+++ b/arch/x86/kernel/kprobes/opt.c
25641@@ -79,6 +79,7 @@ found:
25642 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25643 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25644 {
25645+ pax_open_kernel();
25646 #ifdef CONFIG_X86_64
25647 *addr++ = 0x48;
25648 *addr++ = 0xbf;
25649@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25650 *addr++ = 0xb8;
25651 #endif
25652 *(unsigned long *)addr = val;
25653+ pax_close_kernel();
25654 }
25655
25656 asm (
25657@@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25658 * Verify if the address gap is in 2GB range, because this uses
25659 * a relative jump.
25660 */
25661- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25662+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25663 if (abs(rel) > 0x7fffffff) {
25664 __arch_remove_optimized_kprobe(op, 0);
25665 return -ERANGE;
25666@@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25667 op->optinsn.size = ret;
25668
25669 /* Copy arch-dep-instance from template */
25670- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25671+ pax_open_kernel();
25672+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25673+ pax_close_kernel();
25674
25675 /* Set probe information */
25676 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25677
25678 /* Set probe function call */
25679- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25680+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25681
25682 /* Set returning jmp instruction at the tail of out-of-line buffer */
25683- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25684+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25685 (u8 *)op->kp.addr + op->optinsn.size);
25686
25687 flush_icache_range((unsigned long) buf,
25688@@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25689 WARN_ON(kprobe_disabled(&op->kp));
25690
25691 /* Backup instructions which will be replaced by jump address */
25692- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25693+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25694 RELATIVE_ADDR_SIZE);
25695
25696 insn_buf[0] = RELATIVEJUMP_OPCODE;
25697@@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25698 /* This kprobe is really able to run optimized path. */
25699 op = container_of(p, struct optimized_kprobe, kp);
25700 /* Detour through copied instructions */
25701- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25702+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25703 if (!reenter)
25704 reset_current_kprobe();
25705 preempt_enable_no_resched();
25706diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25707index c2bedae..25e7ab60 100644
25708--- a/arch/x86/kernel/ksysfs.c
25709+++ b/arch/x86/kernel/ksysfs.c
25710@@ -184,7 +184,7 @@ out:
25711
25712 static struct kobj_attribute type_attr = __ATTR_RO(type);
25713
25714-static struct bin_attribute data_attr = {
25715+static bin_attribute_no_const data_attr __read_only = {
25716 .attr = {
25717 .name = "data",
25718 .mode = S_IRUGO,
25719diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25720index c37886d..d851d32 100644
25721--- a/arch/x86/kernel/ldt.c
25722+++ b/arch/x86/kernel/ldt.c
25723@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25724 if (reload) {
25725 #ifdef CONFIG_SMP
25726 preempt_disable();
25727- load_LDT(pc);
25728+ load_LDT_nolock(pc);
25729 if (!cpumask_equal(mm_cpumask(current->mm),
25730 cpumask_of(smp_processor_id())))
25731 smp_call_function(flush_ldt, current->mm, 1);
25732 preempt_enable();
25733 #else
25734- load_LDT(pc);
25735+ load_LDT_nolock(pc);
25736 #endif
25737 }
25738 if (oldsize) {
25739@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25740 return err;
25741
25742 for (i = 0; i < old->size; i++)
25743- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25744+ write_ldt_entry(new->ldt, i, old->ldt + i);
25745 return 0;
25746 }
25747
25748@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25749 retval = copy_ldt(&mm->context, &old_mm->context);
25750 mutex_unlock(&old_mm->context.lock);
25751 }
25752+
25753+ if (tsk == current) {
25754+ mm->context.vdso = 0;
25755+
25756+#ifdef CONFIG_X86_32
25757+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25758+ mm->context.user_cs_base = 0UL;
25759+ mm->context.user_cs_limit = ~0UL;
25760+
25761+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25762+ cpus_clear(mm->context.cpu_user_cs_mask);
25763+#endif
25764+
25765+#endif
25766+#endif
25767+
25768+ }
25769+
25770 return retval;
25771 }
25772
25773@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25774 }
25775 }
25776
25777+#ifdef CONFIG_PAX_SEGMEXEC
25778+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25779+ error = -EINVAL;
25780+ goto out_unlock;
25781+ }
25782+#endif
25783+
25784 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25785 error = -EINVAL;
25786 goto out_unlock;
25787diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
25788index ff3c3101d..d7c0cd8 100644
25789--- a/arch/x86/kernel/livepatch.c
25790+++ b/arch/x86/kernel/livepatch.c
25791@@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25792 int ret, numpages, size = 4;
25793 bool readonly;
25794 unsigned long val;
25795- unsigned long core = (unsigned long)mod->module_core;
25796- unsigned long core_ro_size = mod->core_ro_size;
25797- unsigned long core_size = mod->core_size;
25798+ unsigned long core_rx = (unsigned long)mod->module_core_rx;
25799+ unsigned long core_rw = (unsigned long)mod->module_core_rw;
25800+ unsigned long core_size_rx = mod->core_size_rx;
25801+ unsigned long core_size_rw = mod->core_size_rw;
25802
25803 switch (type) {
25804 case R_X86_64_NONE:
25805@@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25806 return -EINVAL;
25807 }
25808
25809- if (loc < core || loc >= core + core_size)
25810+ if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
25811+ (loc < core_rw || loc >= core_rw + core_size_rw))
25812 /* loc does not point to any symbol inside the module */
25813 return -EINVAL;
25814
25815- if (loc < core + core_ro_size)
25816+ if (loc < core_rx + core_size_rx)
25817 readonly = true;
25818 else
25819 readonly = false;
25820diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25821index 469b23d..5449cfe 100644
25822--- a/arch/x86/kernel/machine_kexec_32.c
25823+++ b/arch/x86/kernel/machine_kexec_32.c
25824@@ -26,7 +26,7 @@
25825 #include <asm/cacheflush.h>
25826 #include <asm/debugreg.h>
25827
25828-static void set_idt(void *newidt, __u16 limit)
25829+static void set_idt(struct desc_struct *newidt, __u16 limit)
25830 {
25831 struct desc_ptr curidt;
25832
25833@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25834 }
25835
25836
25837-static void set_gdt(void *newgdt, __u16 limit)
25838+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25839 {
25840 struct desc_ptr curgdt;
25841
25842@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25843 }
25844
25845 control_page = page_address(image->control_code_page);
25846- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25847+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25848
25849 relocate_kernel_ptr = control_page;
25850 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25851diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25852index 94ea120..4154cea 100644
25853--- a/arch/x86/kernel/mcount_64.S
25854+++ b/arch/x86/kernel/mcount_64.S
25855@@ -7,7 +7,7 @@
25856 #include <linux/linkage.h>
25857 #include <asm/ptrace.h>
25858 #include <asm/ftrace.h>
25859-
25860+#include <asm/alternative-asm.h>
25861
25862 .code64
25863 .section .entry.text, "ax"
25864@@ -148,8 +148,9 @@
25865 #ifdef CONFIG_DYNAMIC_FTRACE
25866
25867 ENTRY(function_hook)
25868+ pax_force_retaddr
25869 retq
25870-END(function_hook)
25871+ENDPROC(function_hook)
25872
25873 ENTRY(ftrace_caller)
25874 /* save_mcount_regs fills in first two parameters */
25875@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25876 #endif
25877
25878 GLOBAL(ftrace_stub)
25879+ pax_force_retaddr
25880 retq
25881-END(ftrace_caller)
25882+ENDPROC(ftrace_caller)
25883
25884 ENTRY(ftrace_regs_caller)
25885 /* Save the current flags before any operations that can change them */
25886@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25887
25888 jmp ftrace_return
25889
25890-END(ftrace_regs_caller)
25891+ENDPROC(ftrace_regs_caller)
25892
25893
25894 #else /* ! CONFIG_DYNAMIC_FTRACE */
25895@@ -272,18 +274,20 @@ fgraph_trace:
25896 #endif
25897
25898 GLOBAL(ftrace_stub)
25899+ pax_force_retaddr
25900 retq
25901
25902 trace:
25903 /* save_mcount_regs fills in first two parameters */
25904 save_mcount_regs
25905
25906+ pax_force_fptr ftrace_trace_function
25907 call *ftrace_trace_function
25908
25909 restore_mcount_regs
25910
25911 jmp fgraph_trace
25912-END(function_hook)
25913+ENDPROC(function_hook)
25914 #endif /* CONFIG_DYNAMIC_FTRACE */
25915 #endif /* CONFIG_FUNCTION_TRACER */
25916
25917@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25918
25919 restore_mcount_regs
25920
25921+ pax_force_retaddr
25922 retq
25923-END(ftrace_graph_caller)
25924+ENDPROC(ftrace_graph_caller)
25925
25926 GLOBAL(return_to_handler)
25927 subq $24, %rsp
25928@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
25929 movq 8(%rsp), %rdx
25930 movq (%rsp), %rax
25931 addq $24, %rsp
25932+ pax_force_fptr %rdi
25933 jmp *%rdi
25934+ENDPROC(return_to_handler)
25935 #endif
25936diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
25937index d1ac80b..f593701 100644
25938--- a/arch/x86/kernel/module.c
25939+++ b/arch/x86/kernel/module.c
25940@@ -82,17 +82,17 @@ static unsigned long int get_module_load_offset(void)
25941 }
25942 #endif
25943
25944-void *module_alloc(unsigned long size)
25945+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
25946 {
25947 void *p;
25948
25949- if (PAGE_ALIGN(size) > MODULES_LEN)
25950+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
25951 return NULL;
25952
25953 p = __vmalloc_node_range(size, MODULE_ALIGN,
25954 MODULES_VADDR + get_module_load_offset(),
25955- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
25956- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
25957+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
25958+ prot, 0, NUMA_NO_NODE,
25959 __builtin_return_address(0));
25960 if (p && (kasan_module_alloc(p, size) < 0)) {
25961 vfree(p);
25962@@ -102,6 +102,51 @@ void *module_alloc(unsigned long size)
25963 return p;
25964 }
25965
25966+void *module_alloc(unsigned long size)
25967+{
25968+
25969+#ifdef CONFIG_PAX_KERNEXEC
25970+ return __module_alloc(size, PAGE_KERNEL);
25971+#else
25972+ return __module_alloc(size, PAGE_KERNEL_EXEC);
25973+#endif
25974+
25975+}
25976+
25977+#ifdef CONFIG_PAX_KERNEXEC
25978+#ifdef CONFIG_X86_32
25979+void *module_alloc_exec(unsigned long size)
25980+{
25981+ struct vm_struct *area;
25982+
25983+ if (size == 0)
25984+ return NULL;
25985+
25986+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
25987+return area ? area->addr : NULL;
25988+}
25989+EXPORT_SYMBOL(module_alloc_exec);
25990+
25991+void module_memfree_exec(void *module_region)
25992+{
25993+ vunmap(module_region);
25994+}
25995+EXPORT_SYMBOL(module_memfree_exec);
25996+#else
25997+void module_memfree_exec(void *module_region)
25998+{
25999+ module_memfree(module_region);
26000+}
26001+EXPORT_SYMBOL(module_memfree_exec);
26002+
26003+void *module_alloc_exec(unsigned long size)
26004+{
26005+ return __module_alloc(size, PAGE_KERNEL_RX);
26006+}
26007+EXPORT_SYMBOL(module_alloc_exec);
26008+#endif
26009+#endif
26010+
26011 #ifdef CONFIG_X86_32
26012 int apply_relocate(Elf32_Shdr *sechdrs,
26013 const char *strtab,
26014@@ -112,14 +157,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26015 unsigned int i;
26016 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26017 Elf32_Sym *sym;
26018- uint32_t *location;
26019+ uint32_t *plocation, location;
26020
26021 DEBUGP("Applying relocate section %u to %u\n",
26022 relsec, sechdrs[relsec].sh_info);
26023 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26024 /* This is where to make the change */
26025- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26026- + rel[i].r_offset;
26027+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26028+ location = (uint32_t)plocation;
26029+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26030+ plocation = ktla_ktva((void *)plocation);
26031 /* This is the symbol it is referring to. Note that all
26032 undefined symbols have been resolved. */
26033 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26034@@ -128,11 +175,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26035 switch (ELF32_R_TYPE(rel[i].r_info)) {
26036 case R_386_32:
26037 /* We add the value into the location given */
26038- *location += sym->st_value;
26039+ pax_open_kernel();
26040+ *plocation += sym->st_value;
26041+ pax_close_kernel();
26042 break;
26043 case R_386_PC32:
26044 /* Add the value, subtract its position */
26045- *location += sym->st_value - (uint32_t)location;
26046+ pax_open_kernel();
26047+ *plocation += sym->st_value - location;
26048+ pax_close_kernel();
26049 break;
26050 default:
26051 pr_err("%s: Unknown relocation: %u\n",
26052@@ -177,21 +228,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26053 case R_X86_64_NONE:
26054 break;
26055 case R_X86_64_64:
26056+ pax_open_kernel();
26057 *(u64 *)loc = val;
26058+ pax_close_kernel();
26059 break;
26060 case R_X86_64_32:
26061+ pax_open_kernel();
26062 *(u32 *)loc = val;
26063+ pax_close_kernel();
26064 if (val != *(u32 *)loc)
26065 goto overflow;
26066 break;
26067 case R_X86_64_32S:
26068+ pax_open_kernel();
26069 *(s32 *)loc = val;
26070+ pax_close_kernel();
26071 if ((s64)val != *(s32 *)loc)
26072 goto overflow;
26073 break;
26074 case R_X86_64_PC32:
26075 val -= (u64)loc;
26076+ pax_open_kernel();
26077 *(u32 *)loc = val;
26078+ pax_close_kernel();
26079+
26080 #if 0
26081 if ((s64)val != *(s32 *)loc)
26082 goto overflow;
26083diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26084index 113e707..0a690e1 100644
26085--- a/arch/x86/kernel/msr.c
26086+++ b/arch/x86/kernel/msr.c
26087@@ -39,6 +39,7 @@
26088 #include <linux/notifier.h>
26089 #include <linux/uaccess.h>
26090 #include <linux/gfp.h>
26091+#include <linux/grsecurity.h>
26092
26093 #include <asm/processor.h>
26094 #include <asm/msr.h>
26095@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26096 int err = 0;
26097 ssize_t bytes = 0;
26098
26099+#ifdef CONFIG_GRKERNSEC_KMEM
26100+ gr_handle_msr_write();
26101+ return -EPERM;
26102+#endif
26103+
26104 if (count % 8)
26105 return -EINVAL; /* Invalid chunk size */
26106
26107@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26108 err = -EBADF;
26109 break;
26110 }
26111+#ifdef CONFIG_GRKERNSEC_KMEM
26112+ gr_handle_msr_write();
26113+ return -EPERM;
26114+#endif
26115 if (copy_from_user(&regs, uregs, sizeof regs)) {
26116 err = -EFAULT;
26117 break;
26118@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26119 return notifier_from_errno(err);
26120 }
26121
26122-static struct notifier_block __refdata msr_class_cpu_notifier = {
26123+static struct notifier_block msr_class_cpu_notifier = {
26124 .notifier_call = msr_class_cpu_callback,
26125 };
26126
26127diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26128index c3e985d..110a36a 100644
26129--- a/arch/x86/kernel/nmi.c
26130+++ b/arch/x86/kernel/nmi.c
26131@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26132
26133 static void nmi_max_handler(struct irq_work *w)
26134 {
26135- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26136+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26137 int remainder_ns, decimal_msecs;
26138- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26139+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26140
26141 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26142 decimal_msecs = remainder_ns / 1000;
26143
26144 printk_ratelimited(KERN_INFO
26145 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26146- a->handler, whole_msecs, decimal_msecs);
26147+ n->action->handler, whole_msecs, decimal_msecs);
26148 }
26149
26150 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26151@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26152 delta = sched_clock() - delta;
26153 trace_nmi_handler(a->handler, (int)delta, thishandled);
26154
26155- if (delta < nmi_longest_ns || delta < a->max_duration)
26156+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26157 continue;
26158
26159- a->max_duration = delta;
26160- irq_work_queue(&a->irq_work);
26161+ a->work->max_duration = delta;
26162+ irq_work_queue(&a->work->irq_work);
26163 }
26164
26165 rcu_read_unlock();
26166@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26167 }
26168 NOKPROBE_SYMBOL(nmi_handle);
26169
26170-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26171+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26172 {
26173 struct nmi_desc *desc = nmi_to_desc(type);
26174 unsigned long flags;
26175@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26176 if (!action->handler)
26177 return -EINVAL;
26178
26179- init_irq_work(&action->irq_work, nmi_max_handler);
26180+ action->work->action = action;
26181+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26182
26183 spin_lock_irqsave(&desc->lock, flags);
26184
26185@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26186 * event confuses some handlers (kdump uses this flag)
26187 */
26188 if (action->flags & NMI_FLAG_FIRST)
26189- list_add_rcu(&action->list, &desc->head);
26190+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26191 else
26192- list_add_tail_rcu(&action->list, &desc->head);
26193+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26194
26195 spin_unlock_irqrestore(&desc->lock, flags);
26196 return 0;
26197@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26198 if (!strcmp(n->name, name)) {
26199 WARN(in_nmi(),
26200 "Trying to free NMI (%s) from NMI context!\n", n->name);
26201- list_del_rcu(&n->list);
26202+ pax_list_del_rcu((struct list_head *)&n->list);
26203 break;
26204 }
26205 }
26206@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26207 dotraplinkage notrace void
26208 do_nmi(struct pt_regs *regs, long error_code)
26209 {
26210+
26211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26212+ if (!user_mode(regs)) {
26213+ unsigned long cs = regs->cs & 0xFFFF;
26214+ unsigned long ip = ktva_ktla(regs->ip);
26215+
26216+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26217+ regs->ip = ip;
26218+ }
26219+#endif
26220+
26221 nmi_nesting_preprocess(regs);
26222
26223 nmi_enter();
26224diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26225index 6d9582e..f746287 100644
26226--- a/arch/x86/kernel/nmi_selftest.c
26227+++ b/arch/x86/kernel/nmi_selftest.c
26228@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26229 {
26230 /* trap all the unknown NMIs we may generate */
26231 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26232- __initdata);
26233+ __initconst);
26234 }
26235
26236 static void __init cleanup_nmi_testsuite(void)
26237@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26238 unsigned long timeout;
26239
26240 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26241- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26242+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26243 nmi_fail = FAILURE;
26244 return;
26245 }
26246diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26247index bbb6c73..24a58ef 100644
26248--- a/arch/x86/kernel/paravirt-spinlocks.c
26249+++ b/arch/x86/kernel/paravirt-spinlocks.c
26250@@ -8,7 +8,7 @@
26251
26252 #include <asm/paravirt.h>
26253
26254-struct pv_lock_ops pv_lock_ops = {
26255+struct pv_lock_ops pv_lock_ops __read_only = {
26256 #ifdef CONFIG_SMP
26257 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26258 .unlock_kick = paravirt_nop,
26259diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26260index 548d25f..f8fb99c 100644
26261--- a/arch/x86/kernel/paravirt.c
26262+++ b/arch/x86/kernel/paravirt.c
26263@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26264 {
26265 return x;
26266 }
26267+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26268+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26269+#endif
26270
26271 void __init default_banner(void)
26272 {
26273@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26274
26275 if (opfunc == NULL)
26276 /* If there's no function, patch it with a ud2a (BUG) */
26277- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26278- else if (opfunc == _paravirt_nop)
26279+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26280+ else if (opfunc == (void *)_paravirt_nop)
26281 /* If the operation is a nop, then nop the callsite */
26282 ret = paravirt_patch_nop();
26283
26284 /* identity functions just return their single argument */
26285- else if (opfunc == _paravirt_ident_32)
26286+ else if (opfunc == (void *)_paravirt_ident_32)
26287 ret = paravirt_patch_ident_32(insnbuf, len);
26288- else if (opfunc == _paravirt_ident_64)
26289+ else if (opfunc == (void *)_paravirt_ident_64)
26290 ret = paravirt_patch_ident_64(insnbuf, len);
26291+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26292+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26293+ ret = paravirt_patch_ident_64(insnbuf, len);
26294+#endif
26295
26296 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26297 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26298@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26299 if (insn_len > len || start == NULL)
26300 insn_len = len;
26301 else
26302- memcpy(insnbuf, start, insn_len);
26303+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26304
26305 return insn_len;
26306 }
26307@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26308 return this_cpu_read(paravirt_lazy_mode);
26309 }
26310
26311-struct pv_info pv_info = {
26312+struct pv_info pv_info __read_only = {
26313 .name = "bare hardware",
26314 .paravirt_enabled = 0,
26315 .kernel_rpl = 0,
26316@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26317 #endif
26318 };
26319
26320-struct pv_init_ops pv_init_ops = {
26321+struct pv_init_ops pv_init_ops __read_only = {
26322 .patch = native_patch,
26323 };
26324
26325-struct pv_time_ops pv_time_ops = {
26326+struct pv_time_ops pv_time_ops __read_only = {
26327 .sched_clock = native_sched_clock,
26328 .steal_clock = native_steal_clock,
26329 };
26330
26331-__visible struct pv_irq_ops pv_irq_ops = {
26332+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26333 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26334 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26335 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26336@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26337 #endif
26338 };
26339
26340-__visible struct pv_cpu_ops pv_cpu_ops = {
26341+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26342 .cpuid = native_cpuid,
26343 .get_debugreg = native_get_debugreg,
26344 .set_debugreg = native_set_debugreg,
26345@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26346 NOKPROBE_SYMBOL(native_set_debugreg);
26347 NOKPROBE_SYMBOL(native_load_idt);
26348
26349-struct pv_apic_ops pv_apic_ops = {
26350+struct pv_apic_ops pv_apic_ops __read_only= {
26351 #ifdef CONFIG_X86_LOCAL_APIC
26352 .startup_ipi_hook = paravirt_nop,
26353 #endif
26354 };
26355
26356-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26357+#ifdef CONFIG_X86_32
26358+#ifdef CONFIG_X86_PAE
26359+/* 64-bit pagetable entries */
26360+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26361+#else
26362 /* 32-bit pagetable entries */
26363 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26364+#endif
26365 #else
26366 /* 64-bit pagetable entries */
26367 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26368 #endif
26369
26370-struct pv_mmu_ops pv_mmu_ops = {
26371+struct pv_mmu_ops pv_mmu_ops __read_only = {
26372
26373 .read_cr2 = native_read_cr2,
26374 .write_cr2 = native_write_cr2,
26375@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26376 .make_pud = PTE_IDENT,
26377
26378 .set_pgd = native_set_pgd,
26379+ .set_pgd_batched = native_set_pgd_batched,
26380 #endif
26381 #endif /* PAGETABLE_LEVELS >= 3 */
26382
26383@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26384 },
26385
26386 .set_fixmap = native_set_fixmap,
26387+
26388+#ifdef CONFIG_PAX_KERNEXEC
26389+ .pax_open_kernel = native_pax_open_kernel,
26390+ .pax_close_kernel = native_pax_close_kernel,
26391+#endif
26392+
26393 };
26394
26395 EXPORT_SYMBOL_GPL(pv_time_ops);
26396diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26397index a1da673..b6f5831 100644
26398--- a/arch/x86/kernel/paravirt_patch_64.c
26399+++ b/arch/x86/kernel/paravirt_patch_64.c
26400@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26401 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26402 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26403 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26404+
26405+#ifndef CONFIG_PAX_MEMORY_UDEREF
26406 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26407+#endif
26408+
26409 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26410 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26411
26412@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26413 PATCH_SITE(pv_mmu_ops, read_cr3);
26414 PATCH_SITE(pv_mmu_ops, write_cr3);
26415 PATCH_SITE(pv_cpu_ops, clts);
26416+
26417+#ifndef CONFIG_PAX_MEMORY_UDEREF
26418 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26419+#endif
26420+
26421 PATCH_SITE(pv_cpu_ops, wbinvd);
26422
26423 patch_site:
26424diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26425index 0497f71..7186c0d 100644
26426--- a/arch/x86/kernel/pci-calgary_64.c
26427+++ b/arch/x86/kernel/pci-calgary_64.c
26428@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26429 tce_space = be64_to_cpu(readq(target));
26430 tce_space = tce_space & TAR_SW_BITS;
26431
26432- tce_space = tce_space & (~specified_table_size);
26433+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26434 info->tce_space = (u64 *)__va(tce_space);
26435 }
26436 }
26437diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26438index 35ccf75..7a15747 100644
26439--- a/arch/x86/kernel/pci-iommu_table.c
26440+++ b/arch/x86/kernel/pci-iommu_table.c
26441@@ -2,7 +2,7 @@
26442 #include <asm/iommu_table.h>
26443 #include <linux/string.h>
26444 #include <linux/kallsyms.h>
26445-
26446+#include <linux/sched.h>
26447
26448 #define DEBUG 1
26449
26450diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26451index 77dd0ad..9ec4723 100644
26452--- a/arch/x86/kernel/pci-swiotlb.c
26453+++ b/arch/x86/kernel/pci-swiotlb.c
26454@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26455 struct dma_attrs *attrs)
26456 {
26457 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26458- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26459+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26460 else
26461 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26462 }
26463diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26464index a388bb8..97064ad 100644
26465--- a/arch/x86/kernel/process.c
26466+++ b/arch/x86/kernel/process.c
26467@@ -38,7 +38,8 @@
26468 * section. Since TSS's are completely CPU-local, we want them
26469 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26470 */
26471-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26472+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26473+EXPORT_SYMBOL(init_tss);
26474
26475 #ifdef CONFIG_X86_64
26476 static DEFINE_PER_CPU(unsigned char, is_idle);
26477@@ -96,7 +97,7 @@ void arch_task_cache_init(void)
26478 task_xstate_cachep =
26479 kmem_cache_create("task_xstate", xstate_size,
26480 __alignof__(union thread_xstate),
26481- SLAB_PANIC | SLAB_NOTRACK, NULL);
26482+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26483 setup_xstate_comp();
26484 }
26485
26486@@ -110,7 +111,7 @@ void exit_thread(void)
26487 unsigned long *bp = t->io_bitmap_ptr;
26488
26489 if (bp) {
26490- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26491+ struct tss_struct *tss = init_tss + get_cpu();
26492
26493 t->io_bitmap_ptr = NULL;
26494 clear_thread_flag(TIF_IO_BITMAP);
26495@@ -130,6 +131,9 @@ void flush_thread(void)
26496 {
26497 struct task_struct *tsk = current;
26498
26499+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26500+ loadsegment(gs, 0);
26501+#endif
26502 flush_ptrace_hw_breakpoint(tsk);
26503 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26504 drop_init_fpu(tsk);
26505@@ -276,7 +280,7 @@ static void __exit_idle(void)
26506 void exit_idle(void)
26507 {
26508 /* idle loop has pid 0 */
26509- if (current->pid)
26510+ if (task_pid_nr(current))
26511 return;
26512 __exit_idle();
26513 }
26514@@ -329,7 +333,7 @@ bool xen_set_default_idle(void)
26515 return ret;
26516 }
26517 #endif
26518-void stop_this_cpu(void *dummy)
26519+__noreturn void stop_this_cpu(void *dummy)
26520 {
26521 local_irq_disable();
26522 /*
26523@@ -508,16 +512,37 @@ static int __init idle_setup(char *str)
26524 }
26525 early_param("idle", idle_setup);
26526
26527-unsigned long arch_align_stack(unsigned long sp)
26528+#ifdef CONFIG_PAX_RANDKSTACK
26529+void pax_randomize_kstack(struct pt_regs *regs)
26530 {
26531- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26532- sp -= get_random_int() % 8192;
26533- return sp & ~0xf;
26534-}
26535+ struct thread_struct *thread = &current->thread;
26536+ unsigned long time;
26537
26538-unsigned long arch_randomize_brk(struct mm_struct *mm)
26539-{
26540- unsigned long range_end = mm->brk + 0x02000000;
26541- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26542-}
26543+ if (!randomize_va_space)
26544+ return;
26545+
26546+ if (v8086_mode(regs))
26547+ return;
26548
26549+ rdtscl(time);
26550+
26551+ /* P4 seems to return a 0 LSB, ignore it */
26552+#ifdef CONFIG_MPENTIUM4
26553+ time &= 0x3EUL;
26554+ time <<= 2;
26555+#elif defined(CONFIG_X86_64)
26556+ time &= 0xFUL;
26557+ time <<= 4;
26558+#else
26559+ time &= 0x1FUL;
26560+ time <<= 3;
26561+#endif
26562+
26563+ thread->sp0 ^= time;
26564+ load_sp0(init_tss + smp_processor_id(), thread);
26565+
26566+#ifdef CONFIG_X86_64
26567+ this_cpu_write(kernel_stack, thread->sp0);
26568+#endif
26569+}
26570+#endif
26571diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26572index 603c4f9..3a105d7 100644
26573--- a/arch/x86/kernel/process_32.c
26574+++ b/arch/x86/kernel/process_32.c
26575@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26576 unsigned long thread_saved_pc(struct task_struct *tsk)
26577 {
26578 return ((unsigned long *)tsk->thread.sp)[3];
26579+//XXX return tsk->thread.eip;
26580 }
26581
26582 void __show_regs(struct pt_regs *regs, int all)
26583@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26584 unsigned long sp;
26585 unsigned short ss, gs;
26586
26587- if (user_mode_vm(regs)) {
26588+ if (user_mode(regs)) {
26589 sp = regs->sp;
26590 ss = regs->ss & 0xffff;
26591- gs = get_user_gs(regs);
26592 } else {
26593 sp = kernel_stack_pointer(regs);
26594 savesegment(ss, ss);
26595- savesegment(gs, gs);
26596 }
26597+ gs = get_user_gs(regs);
26598
26599 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26600 (u16)regs->cs, regs->ip, regs->flags,
26601- smp_processor_id());
26602+ raw_smp_processor_id());
26603 print_symbol("EIP is at %s\n", regs->ip);
26604
26605 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26606@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26607 int copy_thread(unsigned long clone_flags, unsigned long sp,
26608 unsigned long arg, struct task_struct *p)
26609 {
26610- struct pt_regs *childregs = task_pt_regs(p);
26611+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26612 struct task_struct *tsk;
26613 int err;
26614
26615 p->thread.sp = (unsigned long) childregs;
26616 p->thread.sp0 = (unsigned long) (childregs+1);
26617+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26618 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26619
26620 if (unlikely(p->flags & PF_KTHREAD)) {
26621 /* kernel thread */
26622 memset(childregs, 0, sizeof(struct pt_regs));
26623 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26624- task_user_gs(p) = __KERNEL_STACK_CANARY;
26625- childregs->ds = __USER_DS;
26626- childregs->es = __USER_DS;
26627+ savesegment(gs, childregs->gs);
26628+ childregs->ds = __KERNEL_DS;
26629+ childregs->es = __KERNEL_DS;
26630 childregs->fs = __KERNEL_PERCPU;
26631 childregs->bx = sp; /* function */
26632 childregs->bp = arg;
26633@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26634 struct thread_struct *prev = &prev_p->thread,
26635 *next = &next_p->thread;
26636 int cpu = smp_processor_id();
26637- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26638+ struct tss_struct *tss = init_tss + cpu;
26639 fpu_switch_t fpu;
26640
26641 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26642@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26643 */
26644 lazy_save_gs(prev->gs);
26645
26646+#ifdef CONFIG_PAX_MEMORY_UDEREF
26647+ __set_fs(task_thread_info(next_p)->addr_limit);
26648+#endif
26649+
26650 /*
26651 * Load the per-thread Thread-Local Storage descriptor.
26652 */
26653@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26654 */
26655 arch_end_context_switch(next_p);
26656
26657- this_cpu_write(kernel_stack,
26658- (unsigned long)task_stack_page(next_p) +
26659- THREAD_SIZE - KERNEL_STACK_OFFSET);
26660+ this_cpu_write(current_task, next_p);
26661+ this_cpu_write(current_tinfo, &next_p->tinfo);
26662+ this_cpu_write(kernel_stack, next->sp0);
26663
26664 /*
26665 * Restore %gs if needed (which is common)
26666@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26667
26668 switch_fpu_finish(next_p, fpu);
26669
26670- this_cpu_write(current_task, next_p);
26671-
26672 return prev_p;
26673 }
26674
26675@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26676 } while (count++ < 16);
26677 return 0;
26678 }
26679-
26680diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26681index 67fcc43..0d2c630 100644
26682--- a/arch/x86/kernel/process_64.c
26683+++ b/arch/x86/kernel/process_64.c
26684@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26685 struct pt_regs *childregs;
26686 struct task_struct *me = current;
26687
26688- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26689+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26690 childregs = task_pt_regs(p);
26691 p->thread.sp = (unsigned long) childregs;
26692 p->thread.usersp = me->thread.usersp;
26693+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26694 set_tsk_thread_flag(p, TIF_FORK);
26695 p->thread.io_bitmap_ptr = NULL;
26696
26697@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26698 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26699 savesegment(es, p->thread.es);
26700 savesegment(ds, p->thread.ds);
26701+ savesegment(ss, p->thread.ss);
26702+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26703 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26704
26705 if (unlikely(p->flags & PF_KTHREAD)) {
26706@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26707 struct thread_struct *prev = &prev_p->thread;
26708 struct thread_struct *next = &next_p->thread;
26709 int cpu = smp_processor_id();
26710- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26711+ struct tss_struct *tss = init_tss + cpu;
26712 unsigned fsindex, gsindex;
26713 fpu_switch_t fpu;
26714
26715@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26716 if (unlikely(next->ds | prev->ds))
26717 loadsegment(ds, next->ds);
26718
26719+ savesegment(ss, prev->ss);
26720+ if (unlikely(next->ss != prev->ss))
26721+ loadsegment(ss, next->ss);
26722+
26723 /*
26724 * Switch FS and GS.
26725 *
26726@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26727 prev->usersp = this_cpu_read(old_rsp);
26728 this_cpu_write(old_rsp, next->usersp);
26729 this_cpu_write(current_task, next_p);
26730+ this_cpu_write(current_tinfo, &next_p->tinfo);
26731
26732 /*
26733 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26734@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26735 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26736 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26737
26738- this_cpu_write(kernel_stack,
26739- (unsigned long)task_stack_page(next_p) +
26740- THREAD_SIZE - KERNEL_STACK_OFFSET);
26741+ this_cpu_write(kernel_stack, next->sp0);
26742
26743 /*
26744 * Now maybe reload the debug registers and handle I/O bitmaps
26745@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26746 if (!p || p == current || p->state == TASK_RUNNING)
26747 return 0;
26748 stack = (unsigned long)task_stack_page(p);
26749- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26750+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26751 return 0;
26752 fp = *(u64 *)(p->thread.sp);
26753 do {
26754- if (fp < (unsigned long)stack ||
26755- fp >= (unsigned long)stack+THREAD_SIZE)
26756+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26757 return 0;
26758 ip = *(u64 *)(fp+8);
26759 if (!in_sched_functions(ip))
26760diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26761index e510618..5165ac0 100644
26762--- a/arch/x86/kernel/ptrace.c
26763+++ b/arch/x86/kernel/ptrace.c
26764@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26765 unsigned long sp = (unsigned long)&regs->sp;
26766 u32 *prev_esp;
26767
26768- if (context == (sp & ~(THREAD_SIZE - 1)))
26769+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26770 return sp;
26771
26772- prev_esp = (u32 *)(context);
26773+ prev_esp = *(u32 **)(context);
26774 if (prev_esp)
26775 return (unsigned long)prev_esp;
26776
26777@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26778 if (child->thread.gs != value)
26779 return do_arch_prctl(child, ARCH_SET_GS, value);
26780 return 0;
26781+
26782+ case offsetof(struct user_regs_struct,ip):
26783+ /*
26784+ * Protect against any attempt to set ip to an
26785+ * impossible address. There are dragons lurking if the
26786+ * address is noncanonical. (This explicitly allows
26787+ * setting ip to TASK_SIZE_MAX, because user code can do
26788+ * that all by itself by running off the end of its
26789+ * address space.
26790+ */
26791+ if (value > TASK_SIZE_MAX)
26792+ return -EIO;
26793+ break;
26794+
26795 #endif
26796 }
26797
26798@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26799 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26800 {
26801 int i;
26802- int dr7 = 0;
26803+ unsigned long dr7 = 0;
26804 struct arch_hw_breakpoint *info;
26805
26806 for (i = 0; i < HBP_NUM; i++) {
26807@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26808 unsigned long addr, unsigned long data)
26809 {
26810 int ret;
26811- unsigned long __user *datap = (unsigned long __user *)data;
26812+ unsigned long __user *datap = (__force unsigned long __user *)data;
26813
26814 switch (request) {
26815 /* read the word at location addr in the USER area. */
26816@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26817 if ((int) addr < 0)
26818 return -EIO;
26819 ret = do_get_thread_area(child, addr,
26820- (struct user_desc __user *)data);
26821+ (__force struct user_desc __user *) data);
26822 break;
26823
26824 case PTRACE_SET_THREAD_AREA:
26825 if ((int) addr < 0)
26826 return -EIO;
26827 ret = do_set_thread_area(child, addr,
26828- (struct user_desc __user *)data, 0);
26829+ (__force struct user_desc __user *) data, 0);
26830 break;
26831 #endif
26832
26833@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26834
26835 #ifdef CONFIG_X86_64
26836
26837-static struct user_regset x86_64_regsets[] __read_mostly = {
26838+static user_regset_no_const x86_64_regsets[] __read_only = {
26839 [REGSET_GENERAL] = {
26840 .core_note_type = NT_PRSTATUS,
26841 .n = sizeof(struct user_regs_struct) / sizeof(long),
26842@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26843 #endif /* CONFIG_X86_64 */
26844
26845 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26846-static struct user_regset x86_32_regsets[] __read_mostly = {
26847+static user_regset_no_const x86_32_regsets[] __read_only = {
26848 [REGSET_GENERAL] = {
26849 .core_note_type = NT_PRSTATUS,
26850 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26851@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26852 */
26853 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26854
26855-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26856+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26857 {
26858 #ifdef CONFIG_X86_64
26859 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26860@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26861 memset(info, 0, sizeof(*info));
26862 info->si_signo = SIGTRAP;
26863 info->si_code = si_code;
26864- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26865+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26866 }
26867
26868 void user_single_step_siginfo(struct task_struct *tsk,
26869@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26870 }
26871 }
26872
26873+#ifdef CONFIG_GRKERNSEC_SETXID
26874+extern void gr_delayed_cred_worker(void);
26875+#endif
26876+
26877 /*
26878 * We can return 0 to resume the syscall or anything else to go to phase
26879 * 2. If we resume the syscall, we need to put something appropriate in
26880@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26881
26882 BUG_ON(regs != task_pt_regs(current));
26883
26884+#ifdef CONFIG_GRKERNSEC_SETXID
26885+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26886+ gr_delayed_cred_worker();
26887+#endif
26888+
26889 /*
26890 * If we stepped into a sysenter/syscall insn, it trapped in
26891 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26892@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26893 */
26894 user_exit();
26895
26896+#ifdef CONFIG_GRKERNSEC_SETXID
26897+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26898+ gr_delayed_cred_worker();
26899+#endif
26900+
26901 audit_syscall_exit(regs);
26902
26903 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26904diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26905index e5ecd20..60f7eef 100644
26906--- a/arch/x86/kernel/pvclock.c
26907+++ b/arch/x86/kernel/pvclock.c
26908@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26909 reset_hung_task_detector();
26910 }
26911
26912-static atomic64_t last_value = ATOMIC64_INIT(0);
26913+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26914
26915 void pvclock_resume(void)
26916 {
26917- atomic64_set(&last_value, 0);
26918+ atomic64_set_unchecked(&last_value, 0);
26919 }
26920
26921 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26922@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26923 * updating at the same time, and one of them could be slightly behind,
26924 * making the assumption that last_value always go forward fail to hold.
26925 */
26926- last = atomic64_read(&last_value);
26927+ last = atomic64_read_unchecked(&last_value);
26928 do {
26929 if (ret < last)
26930 return last;
26931- last = atomic64_cmpxchg(&last_value, last, ret);
26932+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
26933 } while (unlikely(last != ret));
26934
26935 return ret;
26936diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
26937index 86db4bc..a50a54a 100644
26938--- a/arch/x86/kernel/reboot.c
26939+++ b/arch/x86/kernel/reboot.c
26940@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
26941
26942 void __noreturn machine_real_restart(unsigned int type)
26943 {
26944+
26945+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
26946+ struct desc_struct *gdt;
26947+#endif
26948+
26949 local_irq_disable();
26950
26951 /*
26952@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
26953
26954 /* Jump to the identity-mapped low memory code */
26955 #ifdef CONFIG_X86_32
26956- asm volatile("jmpl *%0" : :
26957+
26958+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
26959+ gdt = get_cpu_gdt_table(smp_processor_id());
26960+ pax_open_kernel();
26961+#ifdef CONFIG_PAX_MEMORY_UDEREF
26962+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
26963+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
26964+ loadsegment(ds, __KERNEL_DS);
26965+ loadsegment(es, __KERNEL_DS);
26966+ loadsegment(ss, __KERNEL_DS);
26967+#endif
26968+#ifdef CONFIG_PAX_KERNEXEC
26969+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
26970+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
26971+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
26972+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
26973+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
26974+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
26975+#endif
26976+ pax_close_kernel();
26977+#endif
26978+
26979+ asm volatile("ljmpl *%0" : :
26980 "rm" (real_mode_header->machine_real_restart_asm),
26981 "a" (type));
26982 #else
26983@@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
26984 /*
26985 * This is a single dmi_table handling all reboot quirks.
26986 */
26987-static struct dmi_system_id __initdata reboot_dmi_table[] = {
26988+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
26989
26990 /* Acer */
26991 { /* Handle reboot issue on Acer Aspire one */
26992@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
26993 * This means that this function can never return, it can misbehave
26994 * by not rebooting properly and hanging.
26995 */
26996-static void native_machine_emergency_restart(void)
26997+static void __noreturn native_machine_emergency_restart(void)
26998 {
26999 int i;
27000 int attempt = 0;
27001@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27002 #endif
27003 }
27004
27005-static void __machine_emergency_restart(int emergency)
27006+static void __noreturn __machine_emergency_restart(int emergency)
27007 {
27008 reboot_emergency = emergency;
27009 machine_ops.emergency_restart();
27010 }
27011
27012-static void native_machine_restart(char *__unused)
27013+static void __noreturn native_machine_restart(char *__unused)
27014 {
27015 pr_notice("machine restart\n");
27016
27017@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27018 __machine_emergency_restart(0);
27019 }
27020
27021-static void native_machine_halt(void)
27022+static void __noreturn native_machine_halt(void)
27023 {
27024 /* Stop other cpus and apics */
27025 machine_shutdown();
27026@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27027 stop_this_cpu(NULL);
27028 }
27029
27030-static void native_machine_power_off(void)
27031+static void __noreturn native_machine_power_off(void)
27032 {
27033 if (pm_power_off) {
27034 if (!reboot_force)
27035@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27036 }
27037 /* A fallback in case there is no PM info available */
27038 tboot_shutdown(TB_SHUTDOWN_HALT);
27039+ unreachable();
27040 }
27041
27042-struct machine_ops machine_ops = {
27043+struct machine_ops machine_ops __read_only = {
27044 .power_off = native_machine_power_off,
27045 .shutdown = native_machine_shutdown,
27046 .emergency_restart = native_machine_emergency_restart,
27047diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27048index c8e41e9..64049ef 100644
27049--- a/arch/x86/kernel/reboot_fixups_32.c
27050+++ b/arch/x86/kernel/reboot_fixups_32.c
27051@@ -57,7 +57,7 @@ struct device_fixup {
27052 unsigned int vendor;
27053 unsigned int device;
27054 void (*reboot_fixup)(struct pci_dev *);
27055-};
27056+} __do_const;
27057
27058 /*
27059 * PCI ids solely used for fixups_table go here
27060diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27061index 3fd2c69..a444264 100644
27062--- a/arch/x86/kernel/relocate_kernel_64.S
27063+++ b/arch/x86/kernel/relocate_kernel_64.S
27064@@ -96,8 +96,7 @@ relocate_kernel:
27065
27066 /* jump to identity mapped page */
27067 addq $(identity_mapped - relocate_kernel), %r8
27068- pushq %r8
27069- ret
27070+ jmp *%r8
27071
27072 identity_mapped:
27073 /* set return address to 0 if not preserving context */
27074diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27075index 0a2421c..11f3f36 100644
27076--- a/arch/x86/kernel/setup.c
27077+++ b/arch/x86/kernel/setup.c
27078@@ -111,6 +111,7 @@
27079 #include <asm/mce.h>
27080 #include <asm/alternative.h>
27081 #include <asm/prom.h>
27082+#include <asm/boot.h>
27083
27084 /*
27085 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27086@@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27087 #endif
27088
27089
27090-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27091-__visible unsigned long mmu_cr4_features;
27092+#ifdef CONFIG_X86_64
27093+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27094+#elif defined(CONFIG_X86_PAE)
27095+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27096 #else
27097-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27098+__visible unsigned long mmu_cr4_features __read_only;
27099 #endif
27100
27101 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27102@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27103 * area (640->1Mb) as ram even though it is not.
27104 * take them out.
27105 */
27106- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27107+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27108
27109 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27110 }
27111@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27112 /* called before trim_bios_range() to spare extra sanitize */
27113 static void __init e820_add_kernel_range(void)
27114 {
27115- u64 start = __pa_symbol(_text);
27116+ u64 start = __pa_symbol(ktla_ktva(_text));
27117 u64 size = __pa_symbol(_end) - start;
27118
27119 /*
27120@@ -855,8 +858,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27121
27122 void __init setup_arch(char **cmdline_p)
27123 {
27124+#ifdef CONFIG_X86_32
27125+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27126+#else
27127 memblock_reserve(__pa_symbol(_text),
27128 (unsigned long)__bss_stop - (unsigned long)_text);
27129+#endif
27130
27131 early_reserve_initrd();
27132
27133@@ -954,16 +961,16 @@ void __init setup_arch(char **cmdline_p)
27134
27135 if (!boot_params.hdr.root_flags)
27136 root_mountflags &= ~MS_RDONLY;
27137- init_mm.start_code = (unsigned long) _text;
27138- init_mm.end_code = (unsigned long) _etext;
27139+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27140+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27141 init_mm.end_data = (unsigned long) _edata;
27142 init_mm.brk = _brk_end;
27143
27144 mpx_mm_init(&init_mm);
27145
27146- code_resource.start = __pa_symbol(_text);
27147- code_resource.end = __pa_symbol(_etext)-1;
27148- data_resource.start = __pa_symbol(_etext);
27149+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27150+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27151+ data_resource.start = __pa_symbol(_sdata);
27152 data_resource.end = __pa_symbol(_edata)-1;
27153 bss_resource.start = __pa_symbol(__bss_start);
27154 bss_resource.end = __pa_symbol(__bss_stop)-1;
27155diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27156index e4fcb87..9c06c55 100644
27157--- a/arch/x86/kernel/setup_percpu.c
27158+++ b/arch/x86/kernel/setup_percpu.c
27159@@ -21,19 +21,17 @@
27160 #include <asm/cpu.h>
27161 #include <asm/stackprotector.h>
27162
27163-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27164+#ifdef CONFIG_SMP
27165+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27166 EXPORT_PER_CPU_SYMBOL(cpu_number);
27167+#endif
27168
27169-#ifdef CONFIG_X86_64
27170 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27171-#else
27172-#define BOOT_PERCPU_OFFSET 0
27173-#endif
27174
27175 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27176 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27177
27178-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27179+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27180 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27181 };
27182 EXPORT_SYMBOL(__per_cpu_offset);
27183@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27184 {
27185 #ifdef CONFIG_NEED_MULTIPLE_NODES
27186 pg_data_t *last = NULL;
27187- unsigned int cpu;
27188+ int cpu;
27189
27190 for_each_possible_cpu(cpu) {
27191 int node = early_cpu_to_node(cpu);
27192@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27193 {
27194 #ifdef CONFIG_X86_32
27195 struct desc_struct gdt;
27196+ unsigned long base = per_cpu_offset(cpu);
27197
27198- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27199- 0x2 | DESCTYPE_S, 0x8);
27200- gdt.s = 1;
27201+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27202+ 0x83 | DESCTYPE_S, 0xC);
27203 write_gdt_entry(get_cpu_gdt_table(cpu),
27204 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27205 #endif
27206@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27207 /* alrighty, percpu areas up and running */
27208 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27209 for_each_possible_cpu(cpu) {
27210+#ifdef CONFIG_CC_STACKPROTECTOR
27211+#ifdef CONFIG_X86_32
27212+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27213+#endif
27214+#endif
27215 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27216 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27217 per_cpu(cpu_number, cpu) = cpu;
27218@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27219 */
27220 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27221 #endif
27222+#ifdef CONFIG_CC_STACKPROTECTOR
27223+#ifdef CONFIG_X86_32
27224+ if (!cpu)
27225+ per_cpu(stack_canary.canary, cpu) = canary;
27226+#endif
27227+#endif
27228 /*
27229 * Up to this point, the boot CPU has been using .init.data
27230 * area. Reload any changed state for the boot CPU.
27231diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27232index e504246..ba10432 100644
27233--- a/arch/x86/kernel/signal.c
27234+++ b/arch/x86/kernel/signal.c
27235@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27236 * Align the stack pointer according to the i386 ABI,
27237 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27238 */
27239- sp = ((sp + 4) & -16ul) - 4;
27240+ sp = ((sp - 12) & -16ul) - 4;
27241 #else /* !CONFIG_X86_32 */
27242 sp = round_down(sp, 16) - 8;
27243 #endif
27244@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27245 }
27246
27247 if (current->mm->context.vdso)
27248- restorer = current->mm->context.vdso +
27249- selected_vdso32->sym___kernel_sigreturn;
27250+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27251 else
27252- restorer = &frame->retcode;
27253+ restorer = (void __user *)&frame->retcode;
27254 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27255 restorer = ksig->ka.sa.sa_restorer;
27256
27257@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27258 * reasons and because gdb uses it as a signature to notice
27259 * signal handler stack frames.
27260 */
27261- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27262+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27263
27264 if (err)
27265 return -EFAULT;
27266@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27267 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27268
27269 /* Set up to return from userspace. */
27270- restorer = current->mm->context.vdso +
27271- selected_vdso32->sym___kernel_rt_sigreturn;
27272+ if (current->mm->context.vdso)
27273+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27274+ else
27275+ restorer = (void __user *)&frame->retcode;
27276 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27277 restorer = ksig->ka.sa.sa_restorer;
27278 put_user_ex(restorer, &frame->pretcode);
27279@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27280 * reasons and because gdb uses it as a signature to notice
27281 * signal handler stack frames.
27282 */
27283- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27284+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27285 } put_user_catch(err);
27286
27287 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27288@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27289 {
27290 int usig = signr_convert(ksig->sig);
27291 sigset_t *set = sigmask_to_save();
27292- compat_sigset_t *cset = (compat_sigset_t *) set;
27293+ sigset_t sigcopy;
27294+ compat_sigset_t *cset;
27295+
27296+ sigcopy = *set;
27297+
27298+ cset = (compat_sigset_t *) &sigcopy;
27299
27300 /* Set up the stack frame */
27301 if (is_ia32_frame()) {
27302@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27303 } else if (is_x32_frame()) {
27304 return x32_setup_rt_frame(ksig, cset, regs);
27305 } else {
27306- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27307+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27308 }
27309 }
27310
27311diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27312index be8e1bd..a3d93fa 100644
27313--- a/arch/x86/kernel/smp.c
27314+++ b/arch/x86/kernel/smp.c
27315@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27316
27317 __setup("nonmi_ipi", nonmi_ipi_setup);
27318
27319-struct smp_ops smp_ops = {
27320+struct smp_ops smp_ops __read_only = {
27321 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27322 .smp_prepare_cpus = native_smp_prepare_cpus,
27323 .smp_cpus_done = native_smp_cpus_done,
27324diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27325index febc6aa..37d8edf 100644
27326--- a/arch/x86/kernel/smpboot.c
27327+++ b/arch/x86/kernel/smpboot.c
27328@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27329
27330 enable_start_cpu0 = 0;
27331
27332-#ifdef CONFIG_X86_32
27333+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27334+ barrier();
27335+
27336 /* switch away from the initial page table */
27337+#ifdef CONFIG_PAX_PER_CPU_PGD
27338+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27339+#else
27340 load_cr3(swapper_pg_dir);
27341+#endif
27342 __flush_tlb_all();
27343-#endif
27344
27345- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27346- barrier();
27347 /*
27348 * Check TSC synchronization with the BP:
27349 */
27350@@ -800,8 +803,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27351 alternatives_enable_smp();
27352
27353 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27354- (THREAD_SIZE + task_stack_page(idle))) - 1);
27355+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27356 per_cpu(current_task, cpu) = idle;
27357+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27358
27359 #ifdef CONFIG_X86_32
27360 /* Stack for startup_32 can be just as for start_secondary onwards */
27361@@ -810,10 +814,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27362 clear_tsk_thread_flag(idle, TIF_FORK);
27363 initial_gs = per_cpu_offset(cpu);
27364 #endif
27365- per_cpu(kernel_stack, cpu) =
27366- (unsigned long)task_stack_page(idle) -
27367- KERNEL_STACK_OFFSET + THREAD_SIZE;
27368+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27369+ pax_open_kernel();
27370 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27371+ pax_close_kernel();
27372 initial_code = (unsigned long)start_secondary;
27373 stack_start = idle->thread.sp;
27374
27375@@ -953,6 +957,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27376 /* the FPU context is blank, nobody can own it */
27377 __cpu_disable_lazy_restore(cpu);
27378
27379+#ifdef CONFIG_PAX_PER_CPU_PGD
27380+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27381+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27382+ KERNEL_PGD_PTRS);
27383+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27384+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27385+ KERNEL_PGD_PTRS);
27386+#endif
27387+
27388 err = do_boot_cpu(apicid, cpu, tidle);
27389 if (err) {
27390 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27391diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27392index 9b4d51d..5d28b58 100644
27393--- a/arch/x86/kernel/step.c
27394+++ b/arch/x86/kernel/step.c
27395@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27396 struct desc_struct *desc;
27397 unsigned long base;
27398
27399- seg &= ~7UL;
27400+ seg >>= 3;
27401
27402 mutex_lock(&child->mm->context.lock);
27403- if (unlikely((seg >> 3) >= child->mm->context.size))
27404+ if (unlikely(seg >= child->mm->context.size))
27405 addr = -1L; /* bogus selector, access would fault */
27406 else {
27407 desc = child->mm->context.ldt + seg;
27408@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27409 addr += base;
27410 }
27411 mutex_unlock(&child->mm->context.lock);
27412- }
27413+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27414+ addr = ktla_ktva(addr);
27415
27416 return addr;
27417 }
27418@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27419 unsigned char opcode[15];
27420 unsigned long addr = convert_ip_to_linear(child, regs);
27421
27422+ if (addr == -EINVAL)
27423+ return 0;
27424+
27425 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27426 for (i = 0; i < copied; i++) {
27427 switch (opcode[i]) {
27428diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27429new file mode 100644
27430index 0000000..5877189
27431--- /dev/null
27432+++ b/arch/x86/kernel/sys_i386_32.c
27433@@ -0,0 +1,189 @@
27434+/*
27435+ * This file contains various random system calls that
27436+ * have a non-standard calling sequence on the Linux/i386
27437+ * platform.
27438+ */
27439+
27440+#include <linux/errno.h>
27441+#include <linux/sched.h>
27442+#include <linux/mm.h>
27443+#include <linux/fs.h>
27444+#include <linux/smp.h>
27445+#include <linux/sem.h>
27446+#include <linux/msg.h>
27447+#include <linux/shm.h>
27448+#include <linux/stat.h>
27449+#include <linux/syscalls.h>
27450+#include <linux/mman.h>
27451+#include <linux/file.h>
27452+#include <linux/utsname.h>
27453+#include <linux/ipc.h>
27454+#include <linux/elf.h>
27455+
27456+#include <linux/uaccess.h>
27457+#include <linux/unistd.h>
27458+
27459+#include <asm/syscalls.h>
27460+
27461+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27462+{
27463+ unsigned long pax_task_size = TASK_SIZE;
27464+
27465+#ifdef CONFIG_PAX_SEGMEXEC
27466+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27467+ pax_task_size = SEGMEXEC_TASK_SIZE;
27468+#endif
27469+
27470+ if (flags & MAP_FIXED)
27471+ if (len > pax_task_size || addr > pax_task_size - len)
27472+ return -EINVAL;
27473+
27474+ return 0;
27475+}
27476+
27477+/*
27478+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27479+ */
27480+static unsigned long get_align_mask(void)
27481+{
27482+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27483+ return 0;
27484+
27485+ if (!(current->flags & PF_RANDOMIZE))
27486+ return 0;
27487+
27488+ return va_align.mask;
27489+}
27490+
27491+unsigned long
27492+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27493+ unsigned long len, unsigned long pgoff, unsigned long flags)
27494+{
27495+ struct mm_struct *mm = current->mm;
27496+ struct vm_area_struct *vma;
27497+ unsigned long pax_task_size = TASK_SIZE;
27498+ struct vm_unmapped_area_info info;
27499+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27500+
27501+#ifdef CONFIG_PAX_SEGMEXEC
27502+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27503+ pax_task_size = SEGMEXEC_TASK_SIZE;
27504+#endif
27505+
27506+ pax_task_size -= PAGE_SIZE;
27507+
27508+ if (len > pax_task_size)
27509+ return -ENOMEM;
27510+
27511+ if (flags & MAP_FIXED)
27512+ return addr;
27513+
27514+#ifdef CONFIG_PAX_RANDMMAP
27515+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27516+#endif
27517+
27518+ if (addr) {
27519+ addr = PAGE_ALIGN(addr);
27520+ if (pax_task_size - len >= addr) {
27521+ vma = find_vma(mm, addr);
27522+ if (check_heap_stack_gap(vma, addr, len, offset))
27523+ return addr;
27524+ }
27525+ }
27526+
27527+ info.flags = 0;
27528+ info.length = len;
27529+ info.align_mask = filp ? get_align_mask() : 0;
27530+ info.align_offset = pgoff << PAGE_SHIFT;
27531+ info.threadstack_offset = offset;
27532+
27533+#ifdef CONFIG_PAX_PAGEEXEC
27534+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27535+ info.low_limit = 0x00110000UL;
27536+ info.high_limit = mm->start_code;
27537+
27538+#ifdef CONFIG_PAX_RANDMMAP
27539+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27540+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27541+#endif
27542+
27543+ if (info.low_limit < info.high_limit) {
27544+ addr = vm_unmapped_area(&info);
27545+ if (!IS_ERR_VALUE(addr))
27546+ return addr;
27547+ }
27548+ } else
27549+#endif
27550+
27551+ info.low_limit = mm->mmap_base;
27552+ info.high_limit = pax_task_size;
27553+
27554+ return vm_unmapped_area(&info);
27555+}
27556+
27557+unsigned long
27558+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27559+ const unsigned long len, const unsigned long pgoff,
27560+ const unsigned long flags)
27561+{
27562+ struct vm_area_struct *vma;
27563+ struct mm_struct *mm = current->mm;
27564+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27565+ struct vm_unmapped_area_info info;
27566+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27567+
27568+#ifdef CONFIG_PAX_SEGMEXEC
27569+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27570+ pax_task_size = SEGMEXEC_TASK_SIZE;
27571+#endif
27572+
27573+ pax_task_size -= PAGE_SIZE;
27574+
27575+ /* requested length too big for entire address space */
27576+ if (len > pax_task_size)
27577+ return -ENOMEM;
27578+
27579+ if (flags & MAP_FIXED)
27580+ return addr;
27581+
27582+#ifdef CONFIG_PAX_PAGEEXEC
27583+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27584+ goto bottomup;
27585+#endif
27586+
27587+#ifdef CONFIG_PAX_RANDMMAP
27588+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27589+#endif
27590+
27591+ /* requesting a specific address */
27592+ if (addr) {
27593+ addr = PAGE_ALIGN(addr);
27594+ if (pax_task_size - len >= addr) {
27595+ vma = find_vma(mm, addr);
27596+ if (check_heap_stack_gap(vma, addr, len, offset))
27597+ return addr;
27598+ }
27599+ }
27600+
27601+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27602+ info.length = len;
27603+ info.low_limit = PAGE_SIZE;
27604+ info.high_limit = mm->mmap_base;
27605+ info.align_mask = filp ? get_align_mask() : 0;
27606+ info.align_offset = pgoff << PAGE_SHIFT;
27607+ info.threadstack_offset = offset;
27608+
27609+ addr = vm_unmapped_area(&info);
27610+ if (!(addr & ~PAGE_MASK))
27611+ return addr;
27612+ VM_BUG_ON(addr != -ENOMEM);
27613+
27614+bottomup:
27615+ /*
27616+ * A failed mmap() very likely causes application failure,
27617+ * so fall back to the bottom-up function here. This scenario
27618+ * can happen with large stack limits and large mmap()
27619+ * allocations.
27620+ */
27621+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27622+}
27623diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27624index 30277e2..5664a29 100644
27625--- a/arch/x86/kernel/sys_x86_64.c
27626+++ b/arch/x86/kernel/sys_x86_64.c
27627@@ -81,8 +81,8 @@ out:
27628 return error;
27629 }
27630
27631-static void find_start_end(unsigned long flags, unsigned long *begin,
27632- unsigned long *end)
27633+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27634+ unsigned long *begin, unsigned long *end)
27635 {
27636 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27637 unsigned long new_begin;
27638@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27639 *begin = new_begin;
27640 }
27641 } else {
27642- *begin = current->mm->mmap_legacy_base;
27643+ *begin = mm->mmap_legacy_base;
27644 *end = TASK_SIZE;
27645 }
27646 }
27647@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27648 struct vm_area_struct *vma;
27649 struct vm_unmapped_area_info info;
27650 unsigned long begin, end;
27651+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27652
27653 if (flags & MAP_FIXED)
27654 return addr;
27655
27656- find_start_end(flags, &begin, &end);
27657+ find_start_end(mm, flags, &begin, &end);
27658
27659 if (len > end)
27660 return -ENOMEM;
27661
27662+#ifdef CONFIG_PAX_RANDMMAP
27663+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27664+#endif
27665+
27666 if (addr) {
27667 addr = PAGE_ALIGN(addr);
27668 vma = find_vma(mm, addr);
27669- if (end - len >= addr &&
27670- (!vma || addr + len <= vma->vm_start))
27671+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27672 return addr;
27673 }
27674
27675@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27676 info.high_limit = end;
27677 info.align_mask = filp ? get_align_mask() : 0;
27678 info.align_offset = pgoff << PAGE_SHIFT;
27679+ info.threadstack_offset = offset;
27680 return vm_unmapped_area(&info);
27681 }
27682
27683@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27684 struct mm_struct *mm = current->mm;
27685 unsigned long addr = addr0;
27686 struct vm_unmapped_area_info info;
27687+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27688
27689 /* requested length too big for entire address space */
27690 if (len > TASK_SIZE)
27691@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27692 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27693 goto bottomup;
27694
27695+#ifdef CONFIG_PAX_RANDMMAP
27696+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27697+#endif
27698+
27699 /* requesting a specific address */
27700 if (addr) {
27701 addr = PAGE_ALIGN(addr);
27702 vma = find_vma(mm, addr);
27703- if (TASK_SIZE - len >= addr &&
27704- (!vma || addr + len <= vma->vm_start))
27705+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27706 return addr;
27707 }
27708
27709@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27710 info.high_limit = mm->mmap_base;
27711 info.align_mask = filp ? get_align_mask() : 0;
27712 info.align_offset = pgoff << PAGE_SHIFT;
27713+ info.threadstack_offset = offset;
27714 addr = vm_unmapped_area(&info);
27715 if (!(addr & ~PAGE_MASK))
27716 return addr;
27717diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27718index 91a4496..42fc304 100644
27719--- a/arch/x86/kernel/tboot.c
27720+++ b/arch/x86/kernel/tboot.c
27721@@ -44,6 +44,7 @@
27722 #include <asm/setup.h>
27723 #include <asm/e820.h>
27724 #include <asm/io.h>
27725+#include <asm/tlbflush.h>
27726
27727 #include "../realmode/rm/wakeup.h"
27728
27729@@ -221,7 +222,7 @@ static int tboot_setup_sleep(void)
27730
27731 void tboot_shutdown(u32 shutdown_type)
27732 {
27733- void (*shutdown)(void);
27734+ void (* __noreturn shutdown)(void);
27735
27736 if (!tboot_enabled())
27737 return;
27738@@ -242,8 +243,9 @@ void tboot_shutdown(u32 shutdown_type)
27739 tboot->shutdown_type = shutdown_type;
27740
27741 switch_to_tboot_pt();
27742+ cr4_clear_bits(X86_CR4_PCIDE);
27743
27744- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27745+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27746 shutdown();
27747
27748 /* should not reach here */
27749@@ -310,7 +312,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27750 return -ENODEV;
27751 }
27752
27753-static atomic_t ap_wfs_count;
27754+static atomic_unchecked_t ap_wfs_count;
27755
27756 static int tboot_wait_for_aps(int num_aps)
27757 {
27758@@ -334,9 +336,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27759 {
27760 switch (action) {
27761 case CPU_DYING:
27762- atomic_inc(&ap_wfs_count);
27763+ atomic_inc_unchecked(&ap_wfs_count);
27764 if (num_online_cpus() == 1)
27765- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27766+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27767 return NOTIFY_BAD;
27768 break;
27769 }
27770@@ -422,7 +424,7 @@ static __init int tboot_late_init(void)
27771
27772 tboot_create_trampoline();
27773
27774- atomic_set(&ap_wfs_count, 0);
27775+ atomic_set_unchecked(&ap_wfs_count, 0);
27776 register_hotcpu_notifier(&tboot_cpu_notifier);
27777
27778 #ifdef CONFIG_DEBUG_FS
27779diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27780index 25adc0e..1df4349 100644
27781--- a/arch/x86/kernel/time.c
27782+++ b/arch/x86/kernel/time.c
27783@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27784 {
27785 unsigned long pc = instruction_pointer(regs);
27786
27787- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27788+ if (!user_mode(regs) && in_lock_functions(pc)) {
27789 #ifdef CONFIG_FRAME_POINTER
27790- return *(unsigned long *)(regs->bp + sizeof(long));
27791+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27792 #else
27793 unsigned long *sp =
27794 (unsigned long *)kernel_stack_pointer(regs);
27795@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27796 * or above a saved flags. Eflags has bits 22-31 zero,
27797 * kernel addresses don't.
27798 */
27799+
27800+#ifdef CONFIG_PAX_KERNEXEC
27801+ return ktla_ktva(sp[0]);
27802+#else
27803 if (sp[0] >> 22)
27804 return sp[0];
27805 if (sp[1] >> 22)
27806 return sp[1];
27807 #endif
27808+
27809+#endif
27810 }
27811 return pc;
27812 }
27813diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27814index 7fc5e84..c6e445a 100644
27815--- a/arch/x86/kernel/tls.c
27816+++ b/arch/x86/kernel/tls.c
27817@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27818 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27819 return -EINVAL;
27820
27821+#ifdef CONFIG_PAX_SEGMEXEC
27822+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27823+ return -EINVAL;
27824+#endif
27825+
27826 set_tls_desc(p, idx, &info, 1);
27827
27828 return 0;
27829@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27830
27831 if (kbuf)
27832 info = kbuf;
27833- else if (__copy_from_user(infobuf, ubuf, count))
27834+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27835 return -EFAULT;
27836 else
27837 info = infobuf;
27838diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27839index 1c113db..287b42e 100644
27840--- a/arch/x86/kernel/tracepoint.c
27841+++ b/arch/x86/kernel/tracepoint.c
27842@@ -9,11 +9,11 @@
27843 #include <linux/atomic.h>
27844
27845 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27846-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27847+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27848 (unsigned long) trace_idt_table };
27849
27850 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27851-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27852+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27853
27854 static int trace_irq_vector_refcount;
27855 static DEFINE_MUTEX(irq_vector_mutex);
27856diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27857index 4ff5d16..736e3e1 100644
27858--- a/arch/x86/kernel/traps.c
27859+++ b/arch/x86/kernel/traps.c
27860@@ -68,7 +68,7 @@
27861 #include <asm/proto.h>
27862
27863 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27864-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27865+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27866 #else
27867 #include <asm/processor-flags.h>
27868 #include <asm/setup.h>
27869@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27870 #endif
27871
27872 /* Must be page-aligned because the real IDT is used in a fixmap. */
27873-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27874+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27875
27876 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27877 EXPORT_SYMBOL_GPL(used_vectors);
27878@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
27879 {
27880 enum ctx_state prev_state;
27881
27882- if (user_mode_vm(regs)) {
27883+ if (user_mode(regs)) {
27884 /* Other than that, we're just an exception. */
27885 prev_state = exception_enter();
27886 } else {
27887@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27888 /* Must be before exception_exit. */
27889 preempt_count_sub(HARDIRQ_OFFSET);
27890
27891- if (user_mode_vm(regs))
27892+ if (user_mode(regs))
27893 return exception_exit(prev_state);
27894 else
27895 rcu_nmi_exit();
27896@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27897 *
27898 * IST exception handlers normally cannot schedule. As a special
27899 * exception, if the exception interrupted userspace code (i.e.
27900- * user_mode_vm(regs) would return true) and the exception was not
27901+ * user_mode(regs) would return true) and the exception was not
27902 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
27903 * begins a non-atomic section within an ist_enter()/ist_exit() region.
27904 * Callers are responsible for enabling interrupts themselves inside
27905@@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27906 */
27907 void ist_begin_non_atomic(struct pt_regs *regs)
27908 {
27909- BUG_ON(!user_mode_vm(regs));
27910+ BUG_ON(!user_mode(regs));
27911
27912 /*
27913 * Sanity check: we need to be on the normal thread stack. This
27914@@ -191,11 +191,11 @@ void ist_end_non_atomic(void)
27915 }
27916
27917 static nokprobe_inline int
27918-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27919+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27920 struct pt_regs *regs, long error_code)
27921 {
27922 #ifdef CONFIG_X86_32
27923- if (regs->flags & X86_VM_MASK) {
27924+ if (v8086_mode(regs)) {
27925 /*
27926 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27927 * On nmi (interrupt 2), do_trap should not be called.
27928@@ -208,12 +208,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27929 return -1;
27930 }
27931 #endif
27932- if (!user_mode(regs)) {
27933+ if (!user_mode_novm(regs)) {
27934 if (!fixup_exception(regs)) {
27935 tsk->thread.error_code = error_code;
27936 tsk->thread.trap_nr = trapnr;
27937+
27938+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27939+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
27940+ str = "PAX: suspicious stack segment fault";
27941+#endif
27942+
27943 die(str, regs, error_code);
27944 }
27945+
27946+#ifdef CONFIG_PAX_REFCOUNT
27947+ if (trapnr == X86_TRAP_OF)
27948+ pax_report_refcount_overflow(regs);
27949+#endif
27950+
27951 return 0;
27952 }
27953
27954@@ -252,7 +264,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
27955 }
27956
27957 static void
27958-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27959+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
27960 long error_code, siginfo_t *info)
27961 {
27962 struct task_struct *tsk = current;
27963@@ -276,7 +288,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
27964 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
27965 printk_ratelimit()) {
27966 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
27967- tsk->comm, tsk->pid, str,
27968+ tsk->comm, task_pid_nr(tsk), str,
27969 regs->ip, regs->sp, error_code);
27970 print_vma_addr(" in ", regs->ip);
27971 pr_cont("\n");
27972@@ -358,6 +370,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
27973 tsk->thread.error_code = error_code;
27974 tsk->thread.trap_nr = X86_TRAP_DF;
27975
27976+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
27977+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
27978+ die("grsec: kernel stack overflow detected", regs, error_code);
27979+#endif
27980+
27981 #ifdef CONFIG_DOUBLEFAULT
27982 df_debug(regs, error_code);
27983 #endif
27984@@ -384,7 +401,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
27985 goto exit;
27986 conditional_sti(regs);
27987
27988- if (!user_mode_vm(regs))
27989+ if (!user_mode(regs))
27990 die("bounds", regs, error_code);
27991
27992 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
27993@@ -463,7 +480,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
27994 conditional_sti(regs);
27995
27996 #ifdef CONFIG_X86_32
27997- if (regs->flags & X86_VM_MASK) {
27998+ if (v8086_mode(regs)) {
27999 local_irq_enable();
28000 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28001 goto exit;
28002@@ -471,18 +488,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28003 #endif
28004
28005 tsk = current;
28006- if (!user_mode(regs)) {
28007+ if (!user_mode_novm(regs)) {
28008 if (fixup_exception(regs))
28009 goto exit;
28010
28011 tsk->thread.error_code = error_code;
28012 tsk->thread.trap_nr = X86_TRAP_GP;
28013 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28014- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28015+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28016+
28017+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28018+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28019+ die("PAX: suspicious general protection fault", regs, error_code);
28020+ else
28021+#endif
28022+
28023 die("general protection fault", regs, error_code);
28024+ }
28025 goto exit;
28026 }
28027
28028+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28029+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28030+ struct mm_struct *mm = tsk->mm;
28031+ unsigned long limit;
28032+
28033+ down_write(&mm->mmap_sem);
28034+ limit = mm->context.user_cs_limit;
28035+ if (limit < TASK_SIZE) {
28036+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28037+ up_write(&mm->mmap_sem);
28038+ return;
28039+ }
28040+ up_write(&mm->mmap_sem);
28041+ }
28042+#endif
28043+
28044 tsk->thread.error_code = error_code;
28045 tsk->thread.trap_nr = X86_TRAP_GP;
28046
28047@@ -581,13 +622,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28048 container_of(task_pt_regs(current),
28049 struct bad_iret_stack, regs);
28050
28051+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28052+ new_stack = s;
28053+
28054 /* Copy the IRET target to the new stack. */
28055 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28056
28057 /* Copy the remainder of the stack from the current stack. */
28058 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28059
28060- BUG_ON(!user_mode_vm(&new_stack->regs));
28061+ BUG_ON(!user_mode(&new_stack->regs));
28062 return new_stack;
28063 }
28064 NOKPROBE_SYMBOL(fixup_bad_iret);
28065@@ -637,7 +681,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28066 * then it's very likely the result of an icebp/int01 trap.
28067 * User wants a sigtrap for that.
28068 */
28069- if (!dr6 && user_mode_vm(regs))
28070+ if (!dr6 && user_mode(regs))
28071 user_icebp = 1;
28072
28073 /* Catch kmemcheck conditions first of all! */
28074@@ -673,7 +717,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28075 /* It's safe to allow irq's after DR6 has been saved */
28076 preempt_conditional_sti(regs);
28077
28078- if (regs->flags & X86_VM_MASK) {
28079+ if (v8086_mode(regs)) {
28080 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28081 X86_TRAP_DB);
28082 preempt_conditional_cli(regs);
28083@@ -688,7 +732,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28084 * We already checked v86 mode above, so we can check for kernel mode
28085 * by just checking the CPL of CS.
28086 */
28087- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28088+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28089 tsk->thread.debugreg6 &= ~DR_STEP;
28090 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28091 regs->flags &= ~X86_EFLAGS_TF;
28092@@ -721,7 +765,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28093 return;
28094 conditional_sti(regs);
28095
28096- if (!user_mode_vm(regs))
28097+ if (!user_mode(regs))
28098 {
28099 if (!fixup_exception(regs)) {
28100 task->thread.error_code = error_code;
28101diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28102index 5054497..139f8f8 100644
28103--- a/arch/x86/kernel/tsc.c
28104+++ b/arch/x86/kernel/tsc.c
28105@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28106 */
28107 smp_wmb();
28108
28109- ACCESS_ONCE(c2n->head) = data;
28110+ ACCESS_ONCE_RW(c2n->head) = data;
28111 }
28112
28113 /*
28114diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28115index 81f8adb0..fff670e 100644
28116--- a/arch/x86/kernel/uprobes.c
28117+++ b/arch/x86/kernel/uprobes.c
28118@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28119 int ret = NOTIFY_DONE;
28120
28121 /* We are only interested in userspace traps */
28122- if (regs && !user_mode_vm(regs))
28123+ if (regs && !user_mode(regs))
28124 return NOTIFY_DONE;
28125
28126 switch (val) {
28127@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28128
28129 if (nleft != rasize) {
28130 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28131- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28132+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28133
28134 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28135 }
28136diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28137index b9242ba..50c5edd 100644
28138--- a/arch/x86/kernel/verify_cpu.S
28139+++ b/arch/x86/kernel/verify_cpu.S
28140@@ -20,6 +20,7 @@
28141 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28142 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28143 * arch/x86/kernel/head_32.S: processor startup
28144+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28145 *
28146 * verify_cpu, returns the status of longmode and SSE in register %eax.
28147 * 0: Success 1: Failure
28148diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28149index e8edcf5..27f9344 100644
28150--- a/arch/x86/kernel/vm86_32.c
28151+++ b/arch/x86/kernel/vm86_32.c
28152@@ -44,6 +44,7 @@
28153 #include <linux/ptrace.h>
28154 #include <linux/audit.h>
28155 #include <linux/stddef.h>
28156+#include <linux/grsecurity.h>
28157
28158 #include <asm/uaccess.h>
28159 #include <asm/io.h>
28160@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28161 do_exit(SIGSEGV);
28162 }
28163
28164- tss = &per_cpu(init_tss, get_cpu());
28165+ tss = init_tss + get_cpu();
28166 current->thread.sp0 = current->thread.saved_sp0;
28167 current->thread.sysenter_cs = __KERNEL_CS;
28168 load_sp0(tss, &current->thread);
28169@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28170
28171 if (tsk->thread.saved_sp0)
28172 return -EPERM;
28173+
28174+#ifdef CONFIG_GRKERNSEC_VM86
28175+ if (!capable(CAP_SYS_RAWIO)) {
28176+ gr_handle_vm86();
28177+ return -EPERM;
28178+ }
28179+#endif
28180+
28181 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28182 offsetof(struct kernel_vm86_struct, vm86plus) -
28183 sizeof(info.regs));
28184@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28185 int tmp;
28186 struct vm86plus_struct __user *v86;
28187
28188+#ifdef CONFIG_GRKERNSEC_VM86
28189+ if (!capable(CAP_SYS_RAWIO)) {
28190+ gr_handle_vm86();
28191+ return -EPERM;
28192+ }
28193+#endif
28194+
28195 tsk = current;
28196 switch (cmd) {
28197 case VM86_REQUEST_IRQ:
28198@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28199 tsk->thread.saved_fs = info->regs32->fs;
28200 tsk->thread.saved_gs = get_user_gs(info->regs32);
28201
28202- tss = &per_cpu(init_tss, get_cpu());
28203+ tss = init_tss + get_cpu();
28204 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28205 if (cpu_has_sep)
28206 tsk->thread.sysenter_cs = 0;
28207@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28208 goto cannot_handle;
28209 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28210 goto cannot_handle;
28211- intr_ptr = (unsigned long __user *) (i << 2);
28212+ intr_ptr = (__force unsigned long __user *) (i << 2);
28213 if (get_user(segoffs, intr_ptr))
28214 goto cannot_handle;
28215 if ((segoffs >> 16) == BIOSSEG)
28216diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28217index 00bf300..129df8e 100644
28218--- a/arch/x86/kernel/vmlinux.lds.S
28219+++ b/arch/x86/kernel/vmlinux.lds.S
28220@@ -26,6 +26,13 @@
28221 #include <asm/page_types.h>
28222 #include <asm/cache.h>
28223 #include <asm/boot.h>
28224+#include <asm/segment.h>
28225+
28226+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28227+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28228+#else
28229+#define __KERNEL_TEXT_OFFSET 0
28230+#endif
28231
28232 #undef i386 /* in case the preprocessor is a 32bit one */
28233
28234@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28235
28236 PHDRS {
28237 text PT_LOAD FLAGS(5); /* R_E */
28238+#ifdef CONFIG_X86_32
28239+ module PT_LOAD FLAGS(5); /* R_E */
28240+#endif
28241+#ifdef CONFIG_XEN
28242+ rodata PT_LOAD FLAGS(5); /* R_E */
28243+#else
28244+ rodata PT_LOAD FLAGS(4); /* R__ */
28245+#endif
28246 data PT_LOAD FLAGS(6); /* RW_ */
28247-#ifdef CONFIG_X86_64
28248+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28249 #ifdef CONFIG_SMP
28250 percpu PT_LOAD FLAGS(6); /* RW_ */
28251 #endif
28252+ text.init PT_LOAD FLAGS(5); /* R_E */
28253+ text.exit PT_LOAD FLAGS(5); /* R_E */
28254 init PT_LOAD FLAGS(7); /* RWE */
28255-#endif
28256 note PT_NOTE FLAGS(0); /* ___ */
28257 }
28258
28259 SECTIONS
28260 {
28261 #ifdef CONFIG_X86_32
28262- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28263- phys_startup_32 = startup_32 - LOAD_OFFSET;
28264+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28265 #else
28266- . = __START_KERNEL;
28267- phys_startup_64 = startup_64 - LOAD_OFFSET;
28268+ . = __START_KERNEL;
28269 #endif
28270
28271 /* Text and read-only data */
28272- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28273- _text = .;
28274+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28275 /* bootstrapping code */
28276+#ifdef CONFIG_X86_32
28277+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28278+#else
28279+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28280+#endif
28281+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28282+ _text = .;
28283 HEAD_TEXT
28284 . = ALIGN(8);
28285 _stext = .;
28286@@ -104,13 +124,47 @@ SECTIONS
28287 IRQENTRY_TEXT
28288 *(.fixup)
28289 *(.gnu.warning)
28290- /* End of text section */
28291- _etext = .;
28292 } :text = 0x9090
28293
28294- NOTES :text :note
28295+ . += __KERNEL_TEXT_OFFSET;
28296
28297- EXCEPTION_TABLE(16) :text = 0x9090
28298+#ifdef CONFIG_X86_32
28299+ . = ALIGN(PAGE_SIZE);
28300+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28301+
28302+#ifdef CONFIG_PAX_KERNEXEC
28303+ MODULES_EXEC_VADDR = .;
28304+ BYTE(0)
28305+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28306+ . = ALIGN(HPAGE_SIZE) - 1;
28307+ MODULES_EXEC_END = .;
28308+#endif
28309+
28310+ } :module
28311+#endif
28312+
28313+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28314+ /* End of text section */
28315+ BYTE(0)
28316+ _etext = . - __KERNEL_TEXT_OFFSET;
28317+ }
28318+
28319+#ifdef CONFIG_X86_32
28320+ . = ALIGN(PAGE_SIZE);
28321+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28322+ . = ALIGN(PAGE_SIZE);
28323+ *(.empty_zero_page)
28324+ *(.initial_pg_fixmap)
28325+ *(.initial_pg_pmd)
28326+ *(.initial_page_table)
28327+ *(.swapper_pg_dir)
28328+ } :rodata
28329+#endif
28330+
28331+ . = ALIGN(PAGE_SIZE);
28332+ NOTES :rodata :note
28333+
28334+ EXCEPTION_TABLE(16) :rodata
28335
28336 #if defined(CONFIG_DEBUG_RODATA)
28337 /* .text should occupy whole number of pages */
28338@@ -122,16 +176,20 @@ SECTIONS
28339
28340 /* Data */
28341 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28342+
28343+#ifdef CONFIG_PAX_KERNEXEC
28344+ . = ALIGN(HPAGE_SIZE);
28345+#else
28346+ . = ALIGN(PAGE_SIZE);
28347+#endif
28348+
28349 /* Start of data section */
28350 _sdata = .;
28351
28352 /* init_task */
28353 INIT_TASK_DATA(THREAD_SIZE)
28354
28355-#ifdef CONFIG_X86_32
28356- /* 32 bit has nosave before _edata */
28357 NOSAVE_DATA
28358-#endif
28359
28360 PAGE_ALIGNED_DATA(PAGE_SIZE)
28361
28362@@ -174,12 +232,19 @@ SECTIONS
28363 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28364
28365 /* Init code and data - will be freed after init */
28366- . = ALIGN(PAGE_SIZE);
28367 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28368+ BYTE(0)
28369+
28370+#ifdef CONFIG_PAX_KERNEXEC
28371+ . = ALIGN(HPAGE_SIZE);
28372+#else
28373+ . = ALIGN(PAGE_SIZE);
28374+#endif
28375+
28376 __init_begin = .; /* paired with __init_end */
28377- }
28378+ } :init.begin
28379
28380-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28381+#ifdef CONFIG_SMP
28382 /*
28383 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28384 * output PHDR, so the next output section - .init.text - should
28385@@ -190,12 +255,27 @@ SECTIONS
28386 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28387 #endif
28388
28389- INIT_TEXT_SECTION(PAGE_SIZE)
28390-#ifdef CONFIG_X86_64
28391- :init
28392-#endif
28393+ . = ALIGN(PAGE_SIZE);
28394+ init_begin = .;
28395+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28396+ VMLINUX_SYMBOL(_sinittext) = .;
28397+ INIT_TEXT
28398+ . = ALIGN(PAGE_SIZE);
28399+ } :text.init
28400
28401- INIT_DATA_SECTION(16)
28402+ /*
28403+ * .exit.text is discard at runtime, not link time, to deal with
28404+ * references from .altinstructions and .eh_frame
28405+ */
28406+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28407+ EXIT_TEXT
28408+ VMLINUX_SYMBOL(_einittext) = .;
28409+ . = ALIGN(16);
28410+ } :text.exit
28411+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28412+
28413+ . = ALIGN(PAGE_SIZE);
28414+ INIT_DATA_SECTION(16) :init
28415
28416 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28417 __x86_cpu_dev_start = .;
28418@@ -266,19 +346,12 @@ SECTIONS
28419 }
28420
28421 . = ALIGN(8);
28422- /*
28423- * .exit.text is discard at runtime, not link time, to deal with
28424- * references from .altinstructions and .eh_frame
28425- */
28426- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28427- EXIT_TEXT
28428- }
28429
28430 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28431 EXIT_DATA
28432 }
28433
28434-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28435+#ifndef CONFIG_SMP
28436 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28437 #endif
28438
28439@@ -297,16 +370,10 @@ SECTIONS
28440 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28441 __smp_locks = .;
28442 *(.smp_locks)
28443- . = ALIGN(PAGE_SIZE);
28444 __smp_locks_end = .;
28445+ . = ALIGN(PAGE_SIZE);
28446 }
28447
28448-#ifdef CONFIG_X86_64
28449- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28450- NOSAVE_DATA
28451- }
28452-#endif
28453-
28454 /* BSS */
28455 . = ALIGN(PAGE_SIZE);
28456 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28457@@ -322,6 +389,7 @@ SECTIONS
28458 __brk_base = .;
28459 . += 64 * 1024; /* 64k alignment slop space */
28460 *(.brk_reservation) /* areas brk users have reserved */
28461+ . = ALIGN(HPAGE_SIZE);
28462 __brk_limit = .;
28463 }
28464
28465@@ -348,13 +416,12 @@ SECTIONS
28466 * for the boot processor.
28467 */
28468 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28469-INIT_PER_CPU(gdt_page);
28470 INIT_PER_CPU(irq_stack_union);
28471
28472 /*
28473 * Build-time check on the image size:
28474 */
28475-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28476+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28477 "kernel image bigger than KERNEL_IMAGE_SIZE");
28478
28479 #ifdef CONFIG_SMP
28480diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28481index 2dcc6ff..082dc7a 100644
28482--- a/arch/x86/kernel/vsyscall_64.c
28483+++ b/arch/x86/kernel/vsyscall_64.c
28484@@ -38,15 +38,13 @@
28485 #define CREATE_TRACE_POINTS
28486 #include "vsyscall_trace.h"
28487
28488-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28489+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28490
28491 static int __init vsyscall_setup(char *str)
28492 {
28493 if (str) {
28494 if (!strcmp("emulate", str))
28495 vsyscall_mode = EMULATE;
28496- else if (!strcmp("native", str))
28497- vsyscall_mode = NATIVE;
28498 else if (!strcmp("none", str))
28499 vsyscall_mode = NONE;
28500 else
28501@@ -264,8 +262,7 @@ do_ret:
28502 return true;
28503
28504 sigsegv:
28505- force_sig(SIGSEGV, current);
28506- return true;
28507+ do_group_exit(SIGKILL);
28508 }
28509
28510 /*
28511@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28512 static struct vm_area_struct gate_vma = {
28513 .vm_start = VSYSCALL_ADDR,
28514 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28515- .vm_page_prot = PAGE_READONLY_EXEC,
28516- .vm_flags = VM_READ | VM_EXEC,
28517+ .vm_page_prot = PAGE_READONLY,
28518+ .vm_flags = VM_READ,
28519 .vm_ops = &gate_vma_ops,
28520 };
28521
28522@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28523 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28524
28525 if (vsyscall_mode != NONE)
28526- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28527- vsyscall_mode == NATIVE
28528- ? PAGE_KERNEL_VSYSCALL
28529- : PAGE_KERNEL_VVAR);
28530+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28531
28532 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28533 (unsigned long)VSYSCALL_ADDR);
28534diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28535index 37d8fa4..66e319a 100644
28536--- a/arch/x86/kernel/x8664_ksyms_64.c
28537+++ b/arch/x86/kernel/x8664_ksyms_64.c
28538@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28539 EXPORT_SYMBOL(copy_user_generic_unrolled);
28540 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28541 EXPORT_SYMBOL(__copy_user_nocache);
28542-EXPORT_SYMBOL(_copy_from_user);
28543-EXPORT_SYMBOL(_copy_to_user);
28544
28545 EXPORT_SYMBOL(copy_page);
28546 EXPORT_SYMBOL(clear_page);
28547@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28548 EXPORT_SYMBOL(___preempt_schedule_context);
28549 #endif
28550 #endif
28551+
28552+#ifdef CONFIG_PAX_PER_CPU_PGD
28553+EXPORT_SYMBOL(cpu_pgd);
28554+#endif
28555diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28556index 234b072..b7ab191 100644
28557--- a/arch/x86/kernel/x86_init.c
28558+++ b/arch/x86/kernel/x86_init.c
28559@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28560 static void default_nmi_init(void) { };
28561 static int default_i8042_detect(void) { return 1; };
28562
28563-struct x86_platform_ops x86_platform = {
28564+struct x86_platform_ops x86_platform __read_only = {
28565 .calibrate_tsc = native_calibrate_tsc,
28566 .get_wallclock = mach_get_cmos_time,
28567 .set_wallclock = mach_set_rtc_mmss,
28568@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28569 EXPORT_SYMBOL_GPL(x86_platform);
28570
28571 #if defined(CONFIG_PCI_MSI)
28572-struct x86_msi_ops x86_msi = {
28573+struct x86_msi_ops x86_msi __read_only = {
28574 .setup_msi_irqs = native_setup_msi_irqs,
28575 .compose_msi_msg = native_compose_msi_msg,
28576 .teardown_msi_irq = native_teardown_msi_irq,
28577@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28578 }
28579 #endif
28580
28581-struct x86_io_apic_ops x86_io_apic_ops = {
28582+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28583 .init = native_io_apic_init_mappings,
28584 .read = native_io_apic_read,
28585 .write = native_io_apic_write,
28586diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28587index cdc6cf9..e04f495 100644
28588--- a/arch/x86/kernel/xsave.c
28589+++ b/arch/x86/kernel/xsave.c
28590@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28591
28592 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28593 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28594- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28595+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28596
28597 if (!use_xsave())
28598 return err;
28599
28600- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28601+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28602
28603 /*
28604 * Read the xstate_bv which we copied (directly from the cpu or
28605 * from the state in task struct) to the user buffers.
28606 */
28607- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28608+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28609
28610 /*
28611 * For legacy compatible, we always set FP/SSE bits in the bit
28612@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28613 */
28614 xstate_bv |= XSTATE_FPSSE;
28615
28616- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28617+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28618
28619 return err;
28620 }
28621@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28622 {
28623 int err;
28624
28625+ buf = (struct xsave_struct __user *)____m(buf);
28626 if (use_xsave())
28627 err = xsave_user(buf);
28628 else if (use_fxsr())
28629@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28630 */
28631 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28632 {
28633+ buf = (void __user *)____m(buf);
28634 if (use_xsave()) {
28635 if ((unsigned long)buf % 64 || fx_only) {
28636 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28637diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28638index 8a80737..bac4961 100644
28639--- a/arch/x86/kvm/cpuid.c
28640+++ b/arch/x86/kvm/cpuid.c
28641@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28642 struct kvm_cpuid2 *cpuid,
28643 struct kvm_cpuid_entry2 __user *entries)
28644 {
28645- int r;
28646+ int r, i;
28647
28648 r = -E2BIG;
28649 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28650 goto out;
28651 r = -EFAULT;
28652- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28653- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28654+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28655 goto out;
28656+ for (i = 0; i < cpuid->nent; ++i) {
28657+ struct kvm_cpuid_entry2 cpuid_entry;
28658+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28659+ goto out;
28660+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28661+ }
28662 vcpu->arch.cpuid_nent = cpuid->nent;
28663 kvm_apic_set_version(vcpu);
28664 kvm_x86_ops->cpuid_update(vcpu);
28665@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28666 struct kvm_cpuid2 *cpuid,
28667 struct kvm_cpuid_entry2 __user *entries)
28668 {
28669- int r;
28670+ int r, i;
28671
28672 r = -E2BIG;
28673 if (cpuid->nent < vcpu->arch.cpuid_nent)
28674 goto out;
28675 r = -EFAULT;
28676- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28677- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28678+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28679 goto out;
28680+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28681+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28682+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28683+ goto out;
28684+ }
28685 return 0;
28686
28687 out:
28688diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28689index 106c015..2db7161 100644
28690--- a/arch/x86/kvm/emulate.c
28691+++ b/arch/x86/kvm/emulate.c
28692@@ -3572,7 +3572,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28693 int cr = ctxt->modrm_reg;
28694 u64 efer = 0;
28695
28696- static u64 cr_reserved_bits[] = {
28697+ static const u64 cr_reserved_bits[] = {
28698 0xffffffff00000000ULL,
28699 0, 0, 0, /* CR3 checked later */
28700 CR4_RESERVED_BITS,
28701diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28702index 4ee827d..a14eff9 100644
28703--- a/arch/x86/kvm/lapic.c
28704+++ b/arch/x86/kvm/lapic.c
28705@@ -56,7 +56,7 @@
28706 #define APIC_BUS_CYCLE_NS 1
28707
28708 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28709-#define apic_debug(fmt, arg...)
28710+#define apic_debug(fmt, arg...) do {} while (0)
28711
28712 #define APIC_LVT_NUM 6
28713 /* 14 is the version for Xeon and Pentium 8.4.8*/
28714diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28715index fd49c86..77e1aa0 100644
28716--- a/arch/x86/kvm/paging_tmpl.h
28717+++ b/arch/x86/kvm/paging_tmpl.h
28718@@ -343,7 +343,7 @@ retry_walk:
28719 if (unlikely(kvm_is_error_hva(host_addr)))
28720 goto error;
28721
28722- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28723+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28724 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28725 goto error;
28726 walker->ptep_user[walker->level - 1] = ptep_user;
28727diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28728index cc618c8..3f72f76 100644
28729--- a/arch/x86/kvm/svm.c
28730+++ b/arch/x86/kvm/svm.c
28731@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28732 int cpu = raw_smp_processor_id();
28733
28734 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28735+
28736+ pax_open_kernel();
28737 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28738+ pax_close_kernel();
28739+
28740 load_TR_desc();
28741 }
28742
28743@@ -3964,6 +3968,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28744 #endif
28745 #endif
28746
28747+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28748+ __set_fs(current_thread_info()->addr_limit);
28749+#endif
28750+
28751 reload_tss(vcpu);
28752
28753 local_irq_disable();
28754diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28755index a60bd3a..748e856 100644
28756--- a/arch/x86/kvm/vmx.c
28757+++ b/arch/x86/kvm/vmx.c
28758@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28759 #endif
28760 }
28761
28762-static void vmcs_clear_bits(unsigned long field, u32 mask)
28763+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28764 {
28765 vmcs_writel(field, vmcs_readl(field) & ~mask);
28766 }
28767
28768-static void vmcs_set_bits(unsigned long field, u32 mask)
28769+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28770 {
28771 vmcs_writel(field, vmcs_readl(field) | mask);
28772 }
28773@@ -1705,7 +1705,11 @@ static void reload_tss(void)
28774 struct desc_struct *descs;
28775
28776 descs = (void *)gdt->address;
28777+
28778+ pax_open_kernel();
28779 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28780+ pax_close_kernel();
28781+
28782 load_TR_desc();
28783 }
28784
28785@@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28786 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28787 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28788
28789+#ifdef CONFIG_PAX_PER_CPU_PGD
28790+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28791+#endif
28792+
28793 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28794 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28795 vmx->loaded_vmcs->cpu = cpu;
28796@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28797 * reads and returns guest's timestamp counter "register"
28798 * guest_tsc = host_tsc + tsc_offset -- 21.3
28799 */
28800-static u64 guest_read_tsc(void)
28801+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28802 {
28803 u64 host_tsc, tsc_offset;
28804
28805@@ -4466,7 +4474,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28806 unsigned long cr4;
28807
28808 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28809+
28810+#ifndef CONFIG_PAX_PER_CPU_PGD
28811 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28812+#endif
28813
28814 /* Save the most likely value for this task's CR4 in the VMCS. */
28815 cr4 = cr4_read_shadow();
28816@@ -4493,7 +4504,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28817 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28818 vmx->host_idt_base = dt.address;
28819
28820- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28821+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28822
28823 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28824 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28825@@ -6104,11 +6115,17 @@ static __init int hardware_setup(void)
28826 * page upon invalidation. No need to do anything if not
28827 * using the APIC_ACCESS_ADDR VMCS field.
28828 */
28829- if (!flexpriority_enabled)
28830- kvm_x86_ops->set_apic_access_page_addr = NULL;
28831+ if (!flexpriority_enabled) {
28832+ pax_open_kernel();
28833+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28834+ pax_close_kernel();
28835+ }
28836
28837- if (!cpu_has_vmx_tpr_shadow())
28838- kvm_x86_ops->update_cr8_intercept = NULL;
28839+ if (!cpu_has_vmx_tpr_shadow()) {
28840+ pax_open_kernel();
28841+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28842+ pax_close_kernel();
28843+ }
28844
28845 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28846 kvm_disable_largepages();
28847@@ -6119,14 +6136,16 @@ static __init int hardware_setup(void)
28848 if (!cpu_has_vmx_apicv())
28849 enable_apicv = 0;
28850
28851+ pax_open_kernel();
28852 if (enable_apicv)
28853- kvm_x86_ops->update_cr8_intercept = NULL;
28854+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28855 else {
28856- kvm_x86_ops->hwapic_irr_update = NULL;
28857- kvm_x86_ops->hwapic_isr_update = NULL;
28858- kvm_x86_ops->deliver_posted_interrupt = NULL;
28859- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28860+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28861+ *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
28862+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28863+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28864 }
28865+ pax_close_kernel();
28866
28867 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
28868 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
28869@@ -6179,10 +6198,12 @@ static __init int hardware_setup(void)
28870 enable_pml = 0;
28871
28872 if (!enable_pml) {
28873- kvm_x86_ops->slot_enable_log_dirty = NULL;
28874- kvm_x86_ops->slot_disable_log_dirty = NULL;
28875- kvm_x86_ops->flush_log_dirty = NULL;
28876- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28877+ pax_open_kernel();
28878+ *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
28879+ *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
28880+ *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
28881+ *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28882+ pax_close_kernel();
28883 }
28884
28885 return alloc_kvm_area();
28886@@ -8227,6 +8248,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28887 "jmp 2f \n\t"
28888 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28889 "2: "
28890+
28891+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28892+ "ljmp %[cs],$3f\n\t"
28893+ "3: "
28894+#endif
28895+
28896 /* Save guest registers, load host registers, keep flags */
28897 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28898 "pop %0 \n\t"
28899@@ -8279,6 +8306,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28900 #endif
28901 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28902 [wordsize]"i"(sizeof(ulong))
28903+
28904+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28905+ ,[cs]"i"(__KERNEL_CS)
28906+#endif
28907+
28908 : "cc", "memory"
28909 #ifdef CONFIG_X86_64
28910 , "rax", "rbx", "rdi", "rsi"
28911@@ -8292,7 +8324,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28912 if (debugctlmsr)
28913 update_debugctlmsr(debugctlmsr);
28914
28915-#ifndef CONFIG_X86_64
28916+#ifdef CONFIG_X86_32
28917 /*
28918 * The sysexit path does not restore ds/es, so we must set them to
28919 * a reasonable value ourselves.
28920@@ -8301,8 +8333,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28921 * may be executed in interrupt context, which saves and restore segments
28922 * around it, nullifying its effect.
28923 */
28924- loadsegment(ds, __USER_DS);
28925- loadsegment(es, __USER_DS);
28926+ loadsegment(ds, __KERNEL_DS);
28927+ loadsegment(es, __KERNEL_DS);
28928+ loadsegment(ss, __KERNEL_DS);
28929+
28930+#ifdef CONFIG_PAX_KERNEXEC
28931+ loadsegment(fs, __KERNEL_PERCPU);
28932+#endif
28933+
28934+#ifdef CONFIG_PAX_MEMORY_UDEREF
28935+ __set_fs(current_thread_info()->addr_limit);
28936+#endif
28937+
28938 #endif
28939
28940 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
28941diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
28942index e222ba5..6f0f2de 100644
28943--- a/arch/x86/kvm/x86.c
28944+++ b/arch/x86/kvm/x86.c
28945@@ -1897,8 +1897,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
28946 {
28947 struct kvm *kvm = vcpu->kvm;
28948 int lm = is_long_mode(vcpu);
28949- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28950- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28951+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
28952+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
28953 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
28954 : kvm->arch.xen_hvm_config.blob_size_32;
28955 u32 page_num = data & ~PAGE_MASK;
28956@@ -2835,6 +2835,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
28957 if (n < msr_list.nmsrs)
28958 goto out;
28959 r = -EFAULT;
28960+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
28961+ goto out;
28962 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
28963 num_msrs_to_save * sizeof(u32)))
28964 goto out;
28965@@ -5739,7 +5741,7 @@ static struct notifier_block pvclock_gtod_notifier = {
28966 };
28967 #endif
28968
28969-int kvm_arch_init(void *opaque)
28970+int kvm_arch_init(const void *opaque)
28971 {
28972 int r;
28973 struct kvm_x86_ops *ops = opaque;
28974diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
28975index ac4453d..1f43bf3 100644
28976--- a/arch/x86/lguest/boot.c
28977+++ b/arch/x86/lguest/boot.c
28978@@ -1340,9 +1340,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
28979 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
28980 * Launcher to reboot us.
28981 */
28982-static void lguest_restart(char *reason)
28983+static __noreturn void lguest_restart(char *reason)
28984 {
28985 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
28986+ BUG();
28987 }
28988
28989 /*G:050
28990diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
28991index 00933d5..3a64af9 100644
28992--- a/arch/x86/lib/atomic64_386_32.S
28993+++ b/arch/x86/lib/atomic64_386_32.S
28994@@ -48,6 +48,10 @@ BEGIN(read)
28995 movl (v), %eax
28996 movl 4(v), %edx
28997 RET_ENDP
28998+BEGIN(read_unchecked)
28999+ movl (v), %eax
29000+ movl 4(v), %edx
29001+RET_ENDP
29002 #undef v
29003
29004 #define v %esi
29005@@ -55,6 +59,10 @@ BEGIN(set)
29006 movl %ebx, (v)
29007 movl %ecx, 4(v)
29008 RET_ENDP
29009+BEGIN(set_unchecked)
29010+ movl %ebx, (v)
29011+ movl %ecx, 4(v)
29012+RET_ENDP
29013 #undef v
29014
29015 #define v %esi
29016@@ -70,6 +78,20 @@ RET_ENDP
29017 BEGIN(add)
29018 addl %eax, (v)
29019 adcl %edx, 4(v)
29020+
29021+#ifdef CONFIG_PAX_REFCOUNT
29022+ jno 0f
29023+ subl %eax, (v)
29024+ sbbl %edx, 4(v)
29025+ int $4
29026+0:
29027+ _ASM_EXTABLE(0b, 0b)
29028+#endif
29029+
29030+RET_ENDP
29031+BEGIN(add_unchecked)
29032+ addl %eax, (v)
29033+ adcl %edx, 4(v)
29034 RET_ENDP
29035 #undef v
29036
29037@@ -77,6 +99,24 @@ RET_ENDP
29038 BEGIN(add_return)
29039 addl (v), %eax
29040 adcl 4(v), %edx
29041+
29042+#ifdef CONFIG_PAX_REFCOUNT
29043+ into
29044+1234:
29045+ _ASM_EXTABLE(1234b, 2f)
29046+#endif
29047+
29048+ movl %eax, (v)
29049+ movl %edx, 4(v)
29050+
29051+#ifdef CONFIG_PAX_REFCOUNT
29052+2:
29053+#endif
29054+
29055+RET_ENDP
29056+BEGIN(add_return_unchecked)
29057+ addl (v), %eax
29058+ adcl 4(v), %edx
29059 movl %eax, (v)
29060 movl %edx, 4(v)
29061 RET_ENDP
29062@@ -86,6 +126,20 @@ RET_ENDP
29063 BEGIN(sub)
29064 subl %eax, (v)
29065 sbbl %edx, 4(v)
29066+
29067+#ifdef CONFIG_PAX_REFCOUNT
29068+ jno 0f
29069+ addl %eax, (v)
29070+ adcl %edx, 4(v)
29071+ int $4
29072+0:
29073+ _ASM_EXTABLE(0b, 0b)
29074+#endif
29075+
29076+RET_ENDP
29077+BEGIN(sub_unchecked)
29078+ subl %eax, (v)
29079+ sbbl %edx, 4(v)
29080 RET_ENDP
29081 #undef v
29082
29083@@ -96,6 +150,27 @@ BEGIN(sub_return)
29084 sbbl $0, %edx
29085 addl (v), %eax
29086 adcl 4(v), %edx
29087+
29088+#ifdef CONFIG_PAX_REFCOUNT
29089+ into
29090+1234:
29091+ _ASM_EXTABLE(1234b, 2f)
29092+#endif
29093+
29094+ movl %eax, (v)
29095+ movl %edx, 4(v)
29096+
29097+#ifdef CONFIG_PAX_REFCOUNT
29098+2:
29099+#endif
29100+
29101+RET_ENDP
29102+BEGIN(sub_return_unchecked)
29103+ negl %edx
29104+ negl %eax
29105+ sbbl $0, %edx
29106+ addl (v), %eax
29107+ adcl 4(v), %edx
29108 movl %eax, (v)
29109 movl %edx, 4(v)
29110 RET_ENDP
29111@@ -105,6 +180,20 @@ RET_ENDP
29112 BEGIN(inc)
29113 addl $1, (v)
29114 adcl $0, 4(v)
29115+
29116+#ifdef CONFIG_PAX_REFCOUNT
29117+ jno 0f
29118+ subl $1, (v)
29119+ sbbl $0, 4(v)
29120+ int $4
29121+0:
29122+ _ASM_EXTABLE(0b, 0b)
29123+#endif
29124+
29125+RET_ENDP
29126+BEGIN(inc_unchecked)
29127+ addl $1, (v)
29128+ adcl $0, 4(v)
29129 RET_ENDP
29130 #undef v
29131
29132@@ -114,6 +203,26 @@ BEGIN(inc_return)
29133 movl 4(v), %edx
29134 addl $1, %eax
29135 adcl $0, %edx
29136+
29137+#ifdef CONFIG_PAX_REFCOUNT
29138+ into
29139+1234:
29140+ _ASM_EXTABLE(1234b, 2f)
29141+#endif
29142+
29143+ movl %eax, (v)
29144+ movl %edx, 4(v)
29145+
29146+#ifdef CONFIG_PAX_REFCOUNT
29147+2:
29148+#endif
29149+
29150+RET_ENDP
29151+BEGIN(inc_return_unchecked)
29152+ movl (v), %eax
29153+ movl 4(v), %edx
29154+ addl $1, %eax
29155+ adcl $0, %edx
29156 movl %eax, (v)
29157 movl %edx, 4(v)
29158 RET_ENDP
29159@@ -123,6 +232,20 @@ RET_ENDP
29160 BEGIN(dec)
29161 subl $1, (v)
29162 sbbl $0, 4(v)
29163+
29164+#ifdef CONFIG_PAX_REFCOUNT
29165+ jno 0f
29166+ addl $1, (v)
29167+ adcl $0, 4(v)
29168+ int $4
29169+0:
29170+ _ASM_EXTABLE(0b, 0b)
29171+#endif
29172+
29173+RET_ENDP
29174+BEGIN(dec_unchecked)
29175+ subl $1, (v)
29176+ sbbl $0, 4(v)
29177 RET_ENDP
29178 #undef v
29179
29180@@ -132,6 +255,26 @@ BEGIN(dec_return)
29181 movl 4(v), %edx
29182 subl $1, %eax
29183 sbbl $0, %edx
29184+
29185+#ifdef CONFIG_PAX_REFCOUNT
29186+ into
29187+1234:
29188+ _ASM_EXTABLE(1234b, 2f)
29189+#endif
29190+
29191+ movl %eax, (v)
29192+ movl %edx, 4(v)
29193+
29194+#ifdef CONFIG_PAX_REFCOUNT
29195+2:
29196+#endif
29197+
29198+RET_ENDP
29199+BEGIN(dec_return_unchecked)
29200+ movl (v), %eax
29201+ movl 4(v), %edx
29202+ subl $1, %eax
29203+ sbbl $0, %edx
29204 movl %eax, (v)
29205 movl %edx, 4(v)
29206 RET_ENDP
29207@@ -143,6 +286,13 @@ BEGIN(add_unless)
29208 adcl %edx, %edi
29209 addl (v), %eax
29210 adcl 4(v), %edx
29211+
29212+#ifdef CONFIG_PAX_REFCOUNT
29213+ into
29214+1234:
29215+ _ASM_EXTABLE(1234b, 2f)
29216+#endif
29217+
29218 cmpl %eax, %ecx
29219 je 3f
29220 1:
29221@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29222 1:
29223 addl $1, %eax
29224 adcl $0, %edx
29225+
29226+#ifdef CONFIG_PAX_REFCOUNT
29227+ into
29228+1234:
29229+ _ASM_EXTABLE(1234b, 2f)
29230+#endif
29231+
29232 movl %eax, (v)
29233 movl %edx, 4(v)
29234 movl $1, %eax
29235@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29236 movl 4(v), %edx
29237 subl $1, %eax
29238 sbbl $0, %edx
29239+
29240+#ifdef CONFIG_PAX_REFCOUNT
29241+ into
29242+1234:
29243+ _ASM_EXTABLE(1234b, 1f)
29244+#endif
29245+
29246 js 1f
29247 movl %eax, (v)
29248 movl %edx, 4(v)
29249diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29250index f5cc9eb..51fa319 100644
29251--- a/arch/x86/lib/atomic64_cx8_32.S
29252+++ b/arch/x86/lib/atomic64_cx8_32.S
29253@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29254 CFI_STARTPROC
29255
29256 read64 %ecx
29257+ pax_force_retaddr
29258 ret
29259 CFI_ENDPROC
29260 ENDPROC(atomic64_read_cx8)
29261
29262+ENTRY(atomic64_read_unchecked_cx8)
29263+ CFI_STARTPROC
29264+
29265+ read64 %ecx
29266+ pax_force_retaddr
29267+ ret
29268+ CFI_ENDPROC
29269+ENDPROC(atomic64_read_unchecked_cx8)
29270+
29271 ENTRY(atomic64_set_cx8)
29272 CFI_STARTPROC
29273
29274@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29275 cmpxchg8b (%esi)
29276 jne 1b
29277
29278+ pax_force_retaddr
29279 ret
29280 CFI_ENDPROC
29281 ENDPROC(atomic64_set_cx8)
29282
29283+ENTRY(atomic64_set_unchecked_cx8)
29284+ CFI_STARTPROC
29285+
29286+1:
29287+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29288+ * are atomic on 586 and newer */
29289+ cmpxchg8b (%esi)
29290+ jne 1b
29291+
29292+ pax_force_retaddr
29293+ ret
29294+ CFI_ENDPROC
29295+ENDPROC(atomic64_set_unchecked_cx8)
29296+
29297 ENTRY(atomic64_xchg_cx8)
29298 CFI_STARTPROC
29299
29300@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29301 cmpxchg8b (%esi)
29302 jne 1b
29303
29304+ pax_force_retaddr
29305 ret
29306 CFI_ENDPROC
29307 ENDPROC(atomic64_xchg_cx8)
29308
29309-.macro addsub_return func ins insc
29310-ENTRY(atomic64_\func\()_return_cx8)
29311+.macro addsub_return func ins insc unchecked=""
29312+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29313 CFI_STARTPROC
29314 SAVE ebp
29315 SAVE ebx
29316@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29317 movl %edx, %ecx
29318 \ins\()l %esi, %ebx
29319 \insc\()l %edi, %ecx
29320+
29321+.ifb \unchecked
29322+#ifdef CONFIG_PAX_REFCOUNT
29323+ into
29324+2:
29325+ _ASM_EXTABLE(2b, 3f)
29326+#endif
29327+.endif
29328+
29329 LOCK_PREFIX
29330 cmpxchg8b (%ebp)
29331 jne 1b
29332-
29333-10:
29334 movl %ebx, %eax
29335 movl %ecx, %edx
29336+
29337+.ifb \unchecked
29338+#ifdef CONFIG_PAX_REFCOUNT
29339+3:
29340+#endif
29341+.endif
29342+
29343 RESTORE edi
29344 RESTORE esi
29345 RESTORE ebx
29346 RESTORE ebp
29347+ pax_force_retaddr
29348 ret
29349 CFI_ENDPROC
29350-ENDPROC(atomic64_\func\()_return_cx8)
29351+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29352 .endm
29353
29354 addsub_return add add adc
29355 addsub_return sub sub sbb
29356+addsub_return add add adc _unchecked
29357+addsub_return sub sub sbb _unchecked
29358
29359-.macro incdec_return func ins insc
29360-ENTRY(atomic64_\func\()_return_cx8)
29361+.macro incdec_return func ins insc unchecked=""
29362+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29363 CFI_STARTPROC
29364 SAVE ebx
29365
29366@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29367 movl %edx, %ecx
29368 \ins\()l $1, %ebx
29369 \insc\()l $0, %ecx
29370+
29371+.ifb \unchecked
29372+#ifdef CONFIG_PAX_REFCOUNT
29373+ into
29374+2:
29375+ _ASM_EXTABLE(2b, 3f)
29376+#endif
29377+.endif
29378+
29379 LOCK_PREFIX
29380 cmpxchg8b (%esi)
29381 jne 1b
29382
29383-10:
29384 movl %ebx, %eax
29385 movl %ecx, %edx
29386+
29387+.ifb \unchecked
29388+#ifdef CONFIG_PAX_REFCOUNT
29389+3:
29390+#endif
29391+.endif
29392+
29393 RESTORE ebx
29394+ pax_force_retaddr
29395 ret
29396 CFI_ENDPROC
29397-ENDPROC(atomic64_\func\()_return_cx8)
29398+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29399 .endm
29400
29401 incdec_return inc add adc
29402 incdec_return dec sub sbb
29403+incdec_return inc add adc _unchecked
29404+incdec_return dec sub sbb _unchecked
29405
29406 ENTRY(atomic64_dec_if_positive_cx8)
29407 CFI_STARTPROC
29408@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29409 movl %edx, %ecx
29410 subl $1, %ebx
29411 sbb $0, %ecx
29412+
29413+#ifdef CONFIG_PAX_REFCOUNT
29414+ into
29415+1234:
29416+ _ASM_EXTABLE(1234b, 2f)
29417+#endif
29418+
29419 js 2f
29420 LOCK_PREFIX
29421 cmpxchg8b (%esi)
29422@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29423 movl %ebx, %eax
29424 movl %ecx, %edx
29425 RESTORE ebx
29426+ pax_force_retaddr
29427 ret
29428 CFI_ENDPROC
29429 ENDPROC(atomic64_dec_if_positive_cx8)
29430@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29431 movl %edx, %ecx
29432 addl %ebp, %ebx
29433 adcl %edi, %ecx
29434+
29435+#ifdef CONFIG_PAX_REFCOUNT
29436+ into
29437+1234:
29438+ _ASM_EXTABLE(1234b, 3f)
29439+#endif
29440+
29441 LOCK_PREFIX
29442 cmpxchg8b (%esi)
29443 jne 1b
29444@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29445 CFI_ADJUST_CFA_OFFSET -8
29446 RESTORE ebx
29447 RESTORE ebp
29448+ pax_force_retaddr
29449 ret
29450 4:
29451 cmpl %edx, 4(%esp)
29452@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29453 xorl %ecx, %ecx
29454 addl $1, %ebx
29455 adcl %edx, %ecx
29456+
29457+#ifdef CONFIG_PAX_REFCOUNT
29458+ into
29459+1234:
29460+ _ASM_EXTABLE(1234b, 3f)
29461+#endif
29462+
29463 LOCK_PREFIX
29464 cmpxchg8b (%esi)
29465 jne 1b
29466@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29467 movl $1, %eax
29468 3:
29469 RESTORE ebx
29470+ pax_force_retaddr
29471 ret
29472 CFI_ENDPROC
29473 ENDPROC(atomic64_inc_not_zero_cx8)
29474diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29475index e78b8eee..7e173a8 100644
29476--- a/arch/x86/lib/checksum_32.S
29477+++ b/arch/x86/lib/checksum_32.S
29478@@ -29,7 +29,8 @@
29479 #include <asm/dwarf2.h>
29480 #include <asm/errno.h>
29481 #include <asm/asm.h>
29482-
29483+#include <asm/segment.h>
29484+
29485 /*
29486 * computes a partial checksum, e.g. for TCP/UDP fragments
29487 */
29488@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29489
29490 #define ARGBASE 16
29491 #define FP 12
29492-
29493-ENTRY(csum_partial_copy_generic)
29494+
29495+ENTRY(csum_partial_copy_generic_to_user)
29496 CFI_STARTPROC
29497+
29498+#ifdef CONFIG_PAX_MEMORY_UDEREF
29499+ pushl_cfi %gs
29500+ popl_cfi %es
29501+ jmp csum_partial_copy_generic
29502+#endif
29503+
29504+ENTRY(csum_partial_copy_generic_from_user)
29505+
29506+#ifdef CONFIG_PAX_MEMORY_UDEREF
29507+ pushl_cfi %gs
29508+ popl_cfi %ds
29509+#endif
29510+
29511+ENTRY(csum_partial_copy_generic)
29512 subl $4,%esp
29513 CFI_ADJUST_CFA_OFFSET 4
29514 pushl_cfi %edi
29515@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29516 jmp 4f
29517 SRC(1: movw (%esi), %bx )
29518 addl $2, %esi
29519-DST( movw %bx, (%edi) )
29520+DST( movw %bx, %es:(%edi) )
29521 addl $2, %edi
29522 addw %bx, %ax
29523 adcl $0, %eax
29524@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29525 SRC(1: movl (%esi), %ebx )
29526 SRC( movl 4(%esi), %edx )
29527 adcl %ebx, %eax
29528-DST( movl %ebx, (%edi) )
29529+DST( movl %ebx, %es:(%edi) )
29530 adcl %edx, %eax
29531-DST( movl %edx, 4(%edi) )
29532+DST( movl %edx, %es:4(%edi) )
29533
29534 SRC( movl 8(%esi), %ebx )
29535 SRC( movl 12(%esi), %edx )
29536 adcl %ebx, %eax
29537-DST( movl %ebx, 8(%edi) )
29538+DST( movl %ebx, %es:8(%edi) )
29539 adcl %edx, %eax
29540-DST( movl %edx, 12(%edi) )
29541+DST( movl %edx, %es:12(%edi) )
29542
29543 SRC( movl 16(%esi), %ebx )
29544 SRC( movl 20(%esi), %edx )
29545 adcl %ebx, %eax
29546-DST( movl %ebx, 16(%edi) )
29547+DST( movl %ebx, %es:16(%edi) )
29548 adcl %edx, %eax
29549-DST( movl %edx, 20(%edi) )
29550+DST( movl %edx, %es:20(%edi) )
29551
29552 SRC( movl 24(%esi), %ebx )
29553 SRC( movl 28(%esi), %edx )
29554 adcl %ebx, %eax
29555-DST( movl %ebx, 24(%edi) )
29556+DST( movl %ebx, %es:24(%edi) )
29557 adcl %edx, %eax
29558-DST( movl %edx, 28(%edi) )
29559+DST( movl %edx, %es:28(%edi) )
29560
29561 lea 32(%esi), %esi
29562 lea 32(%edi), %edi
29563@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29564 shrl $2, %edx # This clears CF
29565 SRC(3: movl (%esi), %ebx )
29566 adcl %ebx, %eax
29567-DST( movl %ebx, (%edi) )
29568+DST( movl %ebx, %es:(%edi) )
29569 lea 4(%esi), %esi
29570 lea 4(%edi), %edi
29571 dec %edx
29572@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29573 jb 5f
29574 SRC( movw (%esi), %cx )
29575 leal 2(%esi), %esi
29576-DST( movw %cx, (%edi) )
29577+DST( movw %cx, %es:(%edi) )
29578 leal 2(%edi), %edi
29579 je 6f
29580 shll $16,%ecx
29581 SRC(5: movb (%esi), %cl )
29582-DST( movb %cl, (%edi) )
29583+DST( movb %cl, %es:(%edi) )
29584 6: addl %ecx, %eax
29585 adcl $0, %eax
29586 7:
29587@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29588
29589 6001:
29590 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29591- movl $-EFAULT, (%ebx)
29592+ movl $-EFAULT, %ss:(%ebx)
29593
29594 # zero the complete destination - computing the rest
29595 # is too much work
29596@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29597
29598 6002:
29599 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29600- movl $-EFAULT,(%ebx)
29601+ movl $-EFAULT,%ss:(%ebx)
29602 jmp 5000b
29603
29604 .previous
29605
29606+ pushl_cfi %ss
29607+ popl_cfi %ds
29608+ pushl_cfi %ss
29609+ popl_cfi %es
29610 popl_cfi %ebx
29611 CFI_RESTORE ebx
29612 popl_cfi %esi
29613@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29614 popl_cfi %ecx # equivalent to addl $4,%esp
29615 ret
29616 CFI_ENDPROC
29617-ENDPROC(csum_partial_copy_generic)
29618+ENDPROC(csum_partial_copy_generic_to_user)
29619
29620 #else
29621
29622 /* Version for PentiumII/PPro */
29623
29624 #define ROUND1(x) \
29625+ nop; nop; nop; \
29626 SRC(movl x(%esi), %ebx ) ; \
29627 addl %ebx, %eax ; \
29628- DST(movl %ebx, x(%edi) ) ;
29629+ DST(movl %ebx, %es:x(%edi)) ;
29630
29631 #define ROUND(x) \
29632+ nop; nop; nop; \
29633 SRC(movl x(%esi), %ebx ) ; \
29634 adcl %ebx, %eax ; \
29635- DST(movl %ebx, x(%edi) ) ;
29636+ DST(movl %ebx, %es:x(%edi)) ;
29637
29638 #define ARGBASE 12
29639-
29640-ENTRY(csum_partial_copy_generic)
29641+
29642+ENTRY(csum_partial_copy_generic_to_user)
29643 CFI_STARTPROC
29644+
29645+#ifdef CONFIG_PAX_MEMORY_UDEREF
29646+ pushl_cfi %gs
29647+ popl_cfi %es
29648+ jmp csum_partial_copy_generic
29649+#endif
29650+
29651+ENTRY(csum_partial_copy_generic_from_user)
29652+
29653+#ifdef CONFIG_PAX_MEMORY_UDEREF
29654+ pushl_cfi %gs
29655+ popl_cfi %ds
29656+#endif
29657+
29658+ENTRY(csum_partial_copy_generic)
29659 pushl_cfi %ebx
29660 CFI_REL_OFFSET ebx, 0
29661 pushl_cfi %edi
29662@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29663 subl %ebx, %edi
29664 lea -1(%esi),%edx
29665 andl $-32,%edx
29666- lea 3f(%ebx,%ebx), %ebx
29667+ lea 3f(%ebx,%ebx,2), %ebx
29668 testl %esi, %esi
29669 jmp *%ebx
29670 1: addl $64,%esi
29671@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29672 jb 5f
29673 SRC( movw (%esi), %dx )
29674 leal 2(%esi), %esi
29675-DST( movw %dx, (%edi) )
29676+DST( movw %dx, %es:(%edi) )
29677 leal 2(%edi), %edi
29678 je 6f
29679 shll $16,%edx
29680 5:
29681 SRC( movb (%esi), %dl )
29682-DST( movb %dl, (%edi) )
29683+DST( movb %dl, %es:(%edi) )
29684 6: addl %edx, %eax
29685 adcl $0, %eax
29686 7:
29687 .section .fixup, "ax"
29688 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29689- movl $-EFAULT, (%ebx)
29690+ movl $-EFAULT, %ss:(%ebx)
29691 # zero the complete destination (computing the rest is too much work)
29692 movl ARGBASE+8(%esp),%edi # dst
29693 movl ARGBASE+12(%esp),%ecx # len
29694@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29695 rep; stosb
29696 jmp 7b
29697 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29698- movl $-EFAULT, (%ebx)
29699+ movl $-EFAULT, %ss:(%ebx)
29700 jmp 7b
29701 .previous
29702
29703+#ifdef CONFIG_PAX_MEMORY_UDEREF
29704+ pushl_cfi %ss
29705+ popl_cfi %ds
29706+ pushl_cfi %ss
29707+ popl_cfi %es
29708+#endif
29709+
29710 popl_cfi %esi
29711 CFI_RESTORE esi
29712 popl_cfi %edi
29713@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29714 CFI_RESTORE ebx
29715 ret
29716 CFI_ENDPROC
29717-ENDPROC(csum_partial_copy_generic)
29718+ENDPROC(csum_partial_copy_generic_to_user)
29719
29720 #undef ROUND
29721 #undef ROUND1
29722diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29723index f2145cf..cea889d 100644
29724--- a/arch/x86/lib/clear_page_64.S
29725+++ b/arch/x86/lib/clear_page_64.S
29726@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29727 movl $4096/8,%ecx
29728 xorl %eax,%eax
29729 rep stosq
29730+ pax_force_retaddr
29731 ret
29732 CFI_ENDPROC
29733 ENDPROC(clear_page_c)
29734@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29735 movl $4096,%ecx
29736 xorl %eax,%eax
29737 rep stosb
29738+ pax_force_retaddr
29739 ret
29740 CFI_ENDPROC
29741 ENDPROC(clear_page_c_e)
29742@@ -43,6 +45,7 @@ ENTRY(clear_page)
29743 leaq 64(%rdi),%rdi
29744 jnz .Lloop
29745 nop
29746+ pax_force_retaddr
29747 ret
29748 CFI_ENDPROC
29749 .Lclear_page_end:
29750@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29751
29752 #include <asm/cpufeature.h>
29753
29754- .section .altinstr_replacement,"ax"
29755+ .section .altinstr_replacement,"a"
29756 1: .byte 0xeb /* jmp <disp8> */
29757 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29758 2: .byte 0xeb /* jmp <disp8> */
29759diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29760index 40a1725..5d12ac4 100644
29761--- a/arch/x86/lib/cmpxchg16b_emu.S
29762+++ b/arch/x86/lib/cmpxchg16b_emu.S
29763@@ -8,6 +8,7 @@
29764 #include <linux/linkage.h>
29765 #include <asm/dwarf2.h>
29766 #include <asm/percpu.h>
29767+#include <asm/alternative-asm.h>
29768
29769 .text
29770
29771@@ -46,12 +47,14 @@ CFI_STARTPROC
29772 CFI_REMEMBER_STATE
29773 popfq_cfi
29774 mov $1, %al
29775+ pax_force_retaddr
29776 ret
29777
29778 CFI_RESTORE_STATE
29779 .Lnot_same:
29780 popfq_cfi
29781 xor %al,%al
29782+ pax_force_retaddr
29783 ret
29784
29785 CFI_ENDPROC
29786diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29787index 176cca6..e0d658e 100644
29788--- a/arch/x86/lib/copy_page_64.S
29789+++ b/arch/x86/lib/copy_page_64.S
29790@@ -9,6 +9,7 @@ copy_page_rep:
29791 CFI_STARTPROC
29792 movl $4096/8, %ecx
29793 rep movsq
29794+ pax_force_retaddr
29795 ret
29796 CFI_ENDPROC
29797 ENDPROC(copy_page_rep)
29798@@ -24,8 +25,8 @@ ENTRY(copy_page)
29799 CFI_ADJUST_CFA_OFFSET 2*8
29800 movq %rbx, (%rsp)
29801 CFI_REL_OFFSET rbx, 0
29802- movq %r12, 1*8(%rsp)
29803- CFI_REL_OFFSET r12, 1*8
29804+ movq %r13, 1*8(%rsp)
29805+ CFI_REL_OFFSET r13, 1*8
29806
29807 movl $(4096/64)-5, %ecx
29808 .p2align 4
29809@@ -38,7 +39,7 @@ ENTRY(copy_page)
29810 movq 0x8*4(%rsi), %r9
29811 movq 0x8*5(%rsi), %r10
29812 movq 0x8*6(%rsi), %r11
29813- movq 0x8*7(%rsi), %r12
29814+ movq 0x8*7(%rsi), %r13
29815
29816 prefetcht0 5*64(%rsi)
29817
29818@@ -49,7 +50,7 @@ ENTRY(copy_page)
29819 movq %r9, 0x8*4(%rdi)
29820 movq %r10, 0x8*5(%rdi)
29821 movq %r11, 0x8*6(%rdi)
29822- movq %r12, 0x8*7(%rdi)
29823+ movq %r13, 0x8*7(%rdi)
29824
29825 leaq 64 (%rsi), %rsi
29826 leaq 64 (%rdi), %rdi
29827@@ -68,7 +69,7 @@ ENTRY(copy_page)
29828 movq 0x8*4(%rsi), %r9
29829 movq 0x8*5(%rsi), %r10
29830 movq 0x8*6(%rsi), %r11
29831- movq 0x8*7(%rsi), %r12
29832+ movq 0x8*7(%rsi), %r13
29833
29834 movq %rax, 0x8*0(%rdi)
29835 movq %rbx, 0x8*1(%rdi)
29836@@ -77,7 +78,7 @@ ENTRY(copy_page)
29837 movq %r9, 0x8*4(%rdi)
29838 movq %r10, 0x8*5(%rdi)
29839 movq %r11, 0x8*6(%rdi)
29840- movq %r12, 0x8*7(%rdi)
29841+ movq %r13, 0x8*7(%rdi)
29842
29843 leaq 64(%rdi), %rdi
29844 leaq 64(%rsi), %rsi
29845@@ -85,10 +86,11 @@ ENTRY(copy_page)
29846
29847 movq (%rsp), %rbx
29848 CFI_RESTORE rbx
29849- movq 1*8(%rsp), %r12
29850- CFI_RESTORE r12
29851+ movq 1*8(%rsp), %r13
29852+ CFI_RESTORE r13
29853 addq $2*8, %rsp
29854 CFI_ADJUST_CFA_OFFSET -2*8
29855+ pax_force_retaddr
29856 ret
29857 .Lcopy_page_end:
29858 CFI_ENDPROC
29859@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29860
29861 #include <asm/cpufeature.h>
29862
29863- .section .altinstr_replacement,"ax"
29864+ .section .altinstr_replacement,"a"
29865 1: .byte 0xeb /* jmp <disp8> */
29866 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29867 2:
29868diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29869index dee945d..a84067b 100644
29870--- a/arch/x86/lib/copy_user_64.S
29871+++ b/arch/x86/lib/copy_user_64.S
29872@@ -18,31 +18,7 @@
29873 #include <asm/alternative-asm.h>
29874 #include <asm/asm.h>
29875 #include <asm/smap.h>
29876-
29877-/*
29878- * By placing feature2 after feature1 in altinstructions section, we logically
29879- * implement:
29880- * If CPU has feature2, jmp to alt2 is used
29881- * else if CPU has feature1, jmp to alt1 is used
29882- * else jmp to orig is used.
29883- */
29884- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29885-0:
29886- .byte 0xe9 /* 32bit jump */
29887- .long \orig-1f /* by default jump to orig */
29888-1:
29889- .section .altinstr_replacement,"ax"
29890-2: .byte 0xe9 /* near jump with 32bit immediate */
29891- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29892-3: .byte 0xe9 /* near jump with 32bit immediate */
29893- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29894- .previous
29895-
29896- .section .altinstructions,"a"
29897- altinstruction_entry 0b,2b,\feature1,5,5
29898- altinstruction_entry 0b,3b,\feature2,5,5
29899- .previous
29900- .endm
29901+#include <asm/pgtable.h>
29902
29903 .macro ALIGN_DESTINATION
29904 #ifdef FIX_ALIGNMENT
29905@@ -70,52 +46,6 @@
29906 #endif
29907 .endm
29908
29909-/* Standard copy_to_user with segment limit checking */
29910-ENTRY(_copy_to_user)
29911- CFI_STARTPROC
29912- GET_THREAD_INFO(%rax)
29913- movq %rdi,%rcx
29914- addq %rdx,%rcx
29915- jc bad_to_user
29916- cmpq TI_addr_limit(%rax),%rcx
29917- ja bad_to_user
29918- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29919- copy_user_generic_unrolled,copy_user_generic_string, \
29920- copy_user_enhanced_fast_string
29921- CFI_ENDPROC
29922-ENDPROC(_copy_to_user)
29923-
29924-/* Standard copy_from_user with segment limit checking */
29925-ENTRY(_copy_from_user)
29926- CFI_STARTPROC
29927- GET_THREAD_INFO(%rax)
29928- movq %rsi,%rcx
29929- addq %rdx,%rcx
29930- jc bad_from_user
29931- cmpq TI_addr_limit(%rax),%rcx
29932- ja bad_from_user
29933- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29934- copy_user_generic_unrolled,copy_user_generic_string, \
29935- copy_user_enhanced_fast_string
29936- CFI_ENDPROC
29937-ENDPROC(_copy_from_user)
29938-
29939- .section .fixup,"ax"
29940- /* must zero dest */
29941-ENTRY(bad_from_user)
29942-bad_from_user:
29943- CFI_STARTPROC
29944- movl %edx,%ecx
29945- xorl %eax,%eax
29946- rep
29947- stosb
29948-bad_to_user:
29949- movl %edx,%eax
29950- ret
29951- CFI_ENDPROC
29952-ENDPROC(bad_from_user)
29953- .previous
29954-
29955 /*
29956 * copy_user_generic_unrolled - memory copy with exception handling.
29957 * This version is for CPUs like P4 that don't have efficient micro
29958@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
29959 */
29960 ENTRY(copy_user_generic_unrolled)
29961 CFI_STARTPROC
29962+ ASM_PAX_OPEN_USERLAND
29963 ASM_STAC
29964 cmpl $8,%edx
29965 jb 20f /* less then 8 bytes, go to byte copy loop */
29966@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
29967 jnz 21b
29968 23: xor %eax,%eax
29969 ASM_CLAC
29970+ ASM_PAX_CLOSE_USERLAND
29971+ pax_force_retaddr
29972 ret
29973
29974 .section .fixup,"ax"
29975@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
29976 */
29977 ENTRY(copy_user_generic_string)
29978 CFI_STARTPROC
29979+ ASM_PAX_OPEN_USERLAND
29980 ASM_STAC
29981 cmpl $8,%edx
29982 jb 2f /* less than 8 bytes, go to byte copy loop */
29983@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
29984 movsb
29985 xorl %eax,%eax
29986 ASM_CLAC
29987+ ASM_PAX_CLOSE_USERLAND
29988+ pax_force_retaddr
29989 ret
29990
29991 .section .fixup,"ax"
29992@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
29993 */
29994 ENTRY(copy_user_enhanced_fast_string)
29995 CFI_STARTPROC
29996+ ASM_PAX_OPEN_USERLAND
29997 ASM_STAC
29998 movl %edx,%ecx
29999 1: rep
30000 movsb
30001 xorl %eax,%eax
30002 ASM_CLAC
30003+ ASM_PAX_CLOSE_USERLAND
30004+ pax_force_retaddr
30005 ret
30006
30007 .section .fixup,"ax"
30008diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30009index 6a4f43c..c70fb52 100644
30010--- a/arch/x86/lib/copy_user_nocache_64.S
30011+++ b/arch/x86/lib/copy_user_nocache_64.S
30012@@ -8,6 +8,7 @@
30013
30014 #include <linux/linkage.h>
30015 #include <asm/dwarf2.h>
30016+#include <asm/alternative-asm.h>
30017
30018 #define FIX_ALIGNMENT 1
30019
30020@@ -16,6 +17,7 @@
30021 #include <asm/thread_info.h>
30022 #include <asm/asm.h>
30023 #include <asm/smap.h>
30024+#include <asm/pgtable.h>
30025
30026 .macro ALIGN_DESTINATION
30027 #ifdef FIX_ALIGNMENT
30028@@ -49,6 +51,16 @@
30029 */
30030 ENTRY(__copy_user_nocache)
30031 CFI_STARTPROC
30032+
30033+#ifdef CONFIG_PAX_MEMORY_UDEREF
30034+ mov pax_user_shadow_base,%rcx
30035+ cmp %rcx,%rsi
30036+ jae 1f
30037+ add %rcx,%rsi
30038+1:
30039+#endif
30040+
30041+ ASM_PAX_OPEN_USERLAND
30042 ASM_STAC
30043 cmpl $8,%edx
30044 jb 20f /* less then 8 bytes, go to byte copy loop */
30045@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30046 jnz 21b
30047 23: xorl %eax,%eax
30048 ASM_CLAC
30049+ ASM_PAX_CLOSE_USERLAND
30050 sfence
30051+ pax_force_retaddr
30052 ret
30053
30054 .section .fixup,"ax"
30055diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30056index 2419d5f..fe52d0e 100644
30057--- a/arch/x86/lib/csum-copy_64.S
30058+++ b/arch/x86/lib/csum-copy_64.S
30059@@ -9,6 +9,7 @@
30060 #include <asm/dwarf2.h>
30061 #include <asm/errno.h>
30062 #include <asm/asm.h>
30063+#include <asm/alternative-asm.h>
30064
30065 /*
30066 * Checksum copy with exception handling.
30067@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30068 CFI_ADJUST_CFA_OFFSET 7*8
30069 movq %rbx, 2*8(%rsp)
30070 CFI_REL_OFFSET rbx, 2*8
30071- movq %r12, 3*8(%rsp)
30072- CFI_REL_OFFSET r12, 3*8
30073+ movq %r15, 3*8(%rsp)
30074+ CFI_REL_OFFSET r15, 3*8
30075 movq %r14, 4*8(%rsp)
30076 CFI_REL_OFFSET r14, 4*8
30077 movq %r13, 5*8(%rsp)
30078@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30079 movl %edx, %ecx
30080
30081 xorl %r9d, %r9d
30082- movq %rcx, %r12
30083+ movq %rcx, %r15
30084
30085- shrq $6, %r12
30086+ shrq $6, %r15
30087 jz .Lhandle_tail /* < 64 */
30088
30089 clc
30090
30091 /* main loop. clear in 64 byte blocks */
30092 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30093- /* r11: temp3, rdx: temp4, r12 loopcnt */
30094+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30095 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30096 .p2align 4
30097 .Lloop:
30098@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30099 adcq %r14, %rax
30100 adcq %r13, %rax
30101
30102- decl %r12d
30103+ decl %r15d
30104
30105 dest
30106 movq %rbx, (%rsi)
30107@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30108 .Lende:
30109 movq 2*8(%rsp), %rbx
30110 CFI_RESTORE rbx
30111- movq 3*8(%rsp), %r12
30112- CFI_RESTORE r12
30113+ movq 3*8(%rsp), %r15
30114+ CFI_RESTORE r15
30115 movq 4*8(%rsp), %r14
30116 CFI_RESTORE r14
30117 movq 5*8(%rsp), %r13
30118@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30119 CFI_RESTORE rbp
30120 addq $7*8, %rsp
30121 CFI_ADJUST_CFA_OFFSET -7*8
30122+ pax_force_retaddr
30123 ret
30124 CFI_RESTORE_STATE
30125
30126diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30127index 1318f75..44c30fd 100644
30128--- a/arch/x86/lib/csum-wrappers_64.c
30129+++ b/arch/x86/lib/csum-wrappers_64.c
30130@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30131 len -= 2;
30132 }
30133 }
30134+ pax_open_userland();
30135 stac();
30136- isum = csum_partial_copy_generic((__force const void *)src,
30137+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30138 dst, len, isum, errp, NULL);
30139 clac();
30140+ pax_close_userland();
30141 if (unlikely(*errp))
30142 goto out_err;
30143
30144@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30145 }
30146
30147 *errp = 0;
30148+ pax_open_userland();
30149 stac();
30150- ret = csum_partial_copy_generic(src, (void __force *)dst,
30151+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30152 len, isum, NULL, errp);
30153 clac();
30154+ pax_close_userland();
30155 return ret;
30156 }
30157 EXPORT_SYMBOL(csum_partial_copy_to_user);
30158diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30159index a451235..a74bfa3 100644
30160--- a/arch/x86/lib/getuser.S
30161+++ b/arch/x86/lib/getuser.S
30162@@ -33,17 +33,40 @@
30163 #include <asm/thread_info.h>
30164 #include <asm/asm.h>
30165 #include <asm/smap.h>
30166+#include <asm/segment.h>
30167+#include <asm/pgtable.h>
30168+#include <asm/alternative-asm.h>
30169+
30170+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30171+#define __copyuser_seg gs;
30172+#else
30173+#define __copyuser_seg
30174+#endif
30175
30176 .text
30177 ENTRY(__get_user_1)
30178 CFI_STARTPROC
30179+
30180+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30181 GET_THREAD_INFO(%_ASM_DX)
30182 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30183 jae bad_get_user
30184+
30185+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30186+ mov pax_user_shadow_base,%_ASM_DX
30187+ cmp %_ASM_DX,%_ASM_AX
30188+ jae 1234f
30189+ add %_ASM_DX,%_ASM_AX
30190+1234:
30191+#endif
30192+
30193+#endif
30194+
30195 ASM_STAC
30196-1: movzbl (%_ASM_AX),%edx
30197+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30198 xor %eax,%eax
30199 ASM_CLAC
30200+ pax_force_retaddr
30201 ret
30202 CFI_ENDPROC
30203 ENDPROC(__get_user_1)
30204@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30205 ENTRY(__get_user_2)
30206 CFI_STARTPROC
30207 add $1,%_ASM_AX
30208+
30209+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30210 jc bad_get_user
30211 GET_THREAD_INFO(%_ASM_DX)
30212 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30213 jae bad_get_user
30214+
30215+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30216+ mov pax_user_shadow_base,%_ASM_DX
30217+ cmp %_ASM_DX,%_ASM_AX
30218+ jae 1234f
30219+ add %_ASM_DX,%_ASM_AX
30220+1234:
30221+#endif
30222+
30223+#endif
30224+
30225 ASM_STAC
30226-2: movzwl -1(%_ASM_AX),%edx
30227+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30228 xor %eax,%eax
30229 ASM_CLAC
30230+ pax_force_retaddr
30231 ret
30232 CFI_ENDPROC
30233 ENDPROC(__get_user_2)
30234@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30235 ENTRY(__get_user_4)
30236 CFI_STARTPROC
30237 add $3,%_ASM_AX
30238+
30239+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30240 jc bad_get_user
30241 GET_THREAD_INFO(%_ASM_DX)
30242 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30243 jae bad_get_user
30244+
30245+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30246+ mov pax_user_shadow_base,%_ASM_DX
30247+ cmp %_ASM_DX,%_ASM_AX
30248+ jae 1234f
30249+ add %_ASM_DX,%_ASM_AX
30250+1234:
30251+#endif
30252+
30253+#endif
30254+
30255 ASM_STAC
30256-3: movl -3(%_ASM_AX),%edx
30257+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30258 xor %eax,%eax
30259 ASM_CLAC
30260+ pax_force_retaddr
30261 ret
30262 CFI_ENDPROC
30263 ENDPROC(__get_user_4)
30264@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30265 GET_THREAD_INFO(%_ASM_DX)
30266 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30267 jae bad_get_user
30268+
30269+#ifdef CONFIG_PAX_MEMORY_UDEREF
30270+ mov pax_user_shadow_base,%_ASM_DX
30271+ cmp %_ASM_DX,%_ASM_AX
30272+ jae 1234f
30273+ add %_ASM_DX,%_ASM_AX
30274+1234:
30275+#endif
30276+
30277 ASM_STAC
30278 4: movq -7(%_ASM_AX),%rdx
30279 xor %eax,%eax
30280 ASM_CLAC
30281+ pax_force_retaddr
30282 ret
30283 #else
30284 add $7,%_ASM_AX
30285@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30286 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30287 jae bad_get_user_8
30288 ASM_STAC
30289-4: movl -7(%_ASM_AX),%edx
30290-5: movl -3(%_ASM_AX),%ecx
30291+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30292+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30293 xor %eax,%eax
30294 ASM_CLAC
30295+ pax_force_retaddr
30296 ret
30297 #endif
30298 CFI_ENDPROC
30299@@ -113,6 +175,7 @@ bad_get_user:
30300 xor %edx,%edx
30301 mov $(-EFAULT),%_ASM_AX
30302 ASM_CLAC
30303+ pax_force_retaddr
30304 ret
30305 CFI_ENDPROC
30306 END(bad_get_user)
30307@@ -124,6 +187,7 @@ bad_get_user_8:
30308 xor %ecx,%ecx
30309 mov $(-EFAULT),%_ASM_AX
30310 ASM_CLAC
30311+ pax_force_retaddr
30312 ret
30313 CFI_ENDPROC
30314 END(bad_get_user_8)
30315diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30316index 85994f5..9929d7f 100644
30317--- a/arch/x86/lib/insn.c
30318+++ b/arch/x86/lib/insn.c
30319@@ -20,8 +20,10 @@
30320
30321 #ifdef __KERNEL__
30322 #include <linux/string.h>
30323+#include <asm/pgtable_types.h>
30324 #else
30325 #include <string.h>
30326+#define ktla_ktva(addr) addr
30327 #endif
30328 #include <asm/inat.h>
30329 #include <asm/insn.h>
30330@@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30331 buf_len = MAX_INSN_SIZE;
30332
30333 memset(insn, 0, sizeof(*insn));
30334- insn->kaddr = kaddr;
30335- insn->end_kaddr = kaddr + buf_len;
30336- insn->next_byte = kaddr;
30337+ insn->kaddr = ktla_ktva(kaddr);
30338+ insn->end_kaddr = insn->kaddr + buf_len;
30339+ insn->next_byte = insn->kaddr;
30340 insn->x86_64 = x86_64 ? 1 : 0;
30341 insn->opnd_bytes = 4;
30342 if (x86_64)
30343diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30344index 05a95e7..326f2fa 100644
30345--- a/arch/x86/lib/iomap_copy_64.S
30346+++ b/arch/x86/lib/iomap_copy_64.S
30347@@ -17,6 +17,7 @@
30348
30349 #include <linux/linkage.h>
30350 #include <asm/dwarf2.h>
30351+#include <asm/alternative-asm.h>
30352
30353 /*
30354 * override generic version in lib/iomap_copy.c
30355@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30356 CFI_STARTPROC
30357 movl %edx,%ecx
30358 rep movsd
30359+ pax_force_retaddr
30360 ret
30361 CFI_ENDPROC
30362 ENDPROC(__iowrite32_copy)
30363diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30364index 89b53c9..97357ca 100644
30365--- a/arch/x86/lib/memcpy_64.S
30366+++ b/arch/x86/lib/memcpy_64.S
30367@@ -24,7 +24,7 @@
30368 * This gets patched over the unrolled variant (below) via the
30369 * alternative instructions framework:
30370 */
30371- .section .altinstr_replacement, "ax", @progbits
30372+ .section .altinstr_replacement, "a", @progbits
30373 .Lmemcpy_c:
30374 movq %rdi, %rax
30375 movq %rdx, %rcx
30376@@ -33,6 +33,7 @@
30377 rep movsq
30378 movl %edx, %ecx
30379 rep movsb
30380+ pax_force_retaddr
30381 ret
30382 .Lmemcpy_e:
30383 .previous
30384@@ -44,11 +45,12 @@
30385 * This gets patched over the unrolled variant (below) via the
30386 * alternative instructions framework:
30387 */
30388- .section .altinstr_replacement, "ax", @progbits
30389+ .section .altinstr_replacement, "a", @progbits
30390 .Lmemcpy_c_e:
30391 movq %rdi, %rax
30392 movq %rdx, %rcx
30393 rep movsb
30394+ pax_force_retaddr
30395 ret
30396 .Lmemcpy_e_e:
30397 .previous
30398@@ -138,6 +140,7 @@ ENTRY(memcpy)
30399 movq %r9, 1*8(%rdi)
30400 movq %r10, -2*8(%rdi, %rdx)
30401 movq %r11, -1*8(%rdi, %rdx)
30402+ pax_force_retaddr
30403 retq
30404 .p2align 4
30405 .Lless_16bytes:
30406@@ -150,6 +153,7 @@ ENTRY(memcpy)
30407 movq -1*8(%rsi, %rdx), %r9
30408 movq %r8, 0*8(%rdi)
30409 movq %r9, -1*8(%rdi, %rdx)
30410+ pax_force_retaddr
30411 retq
30412 .p2align 4
30413 .Lless_8bytes:
30414@@ -163,6 +167,7 @@ ENTRY(memcpy)
30415 movl -4(%rsi, %rdx), %r8d
30416 movl %ecx, (%rdi)
30417 movl %r8d, -4(%rdi, %rdx)
30418+ pax_force_retaddr
30419 retq
30420 .p2align 4
30421 .Lless_3bytes:
30422@@ -181,6 +186,7 @@ ENTRY(memcpy)
30423 movb %cl, (%rdi)
30424
30425 .Lend:
30426+ pax_force_retaddr
30427 retq
30428 CFI_ENDPROC
30429 ENDPROC(memcpy)
30430diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30431index 9c4b530..830b77a 100644
30432--- a/arch/x86/lib/memmove_64.S
30433+++ b/arch/x86/lib/memmove_64.S
30434@@ -205,14 +205,16 @@ ENTRY(__memmove)
30435 movb (%rsi), %r11b
30436 movb %r11b, (%rdi)
30437 13:
30438+ pax_force_retaddr
30439 retq
30440 CFI_ENDPROC
30441
30442- .section .altinstr_replacement,"ax"
30443+ .section .altinstr_replacement,"a"
30444 .Lmemmove_begin_forward_efs:
30445 /* Forward moving data. */
30446 movq %rdx, %rcx
30447 rep movsb
30448+ pax_force_retaddr
30449 retq
30450 .Lmemmove_end_forward_efs:
30451 .previous
30452diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30453index 6f44935..fbf5f6d 100644
30454--- a/arch/x86/lib/memset_64.S
30455+++ b/arch/x86/lib/memset_64.S
30456@@ -16,7 +16,7 @@
30457 *
30458 * rax original destination
30459 */
30460- .section .altinstr_replacement, "ax", @progbits
30461+ .section .altinstr_replacement, "a", @progbits
30462 .Lmemset_c:
30463 movq %rdi,%r9
30464 movq %rdx,%rcx
30465@@ -30,6 +30,7 @@
30466 movl %edx,%ecx
30467 rep stosb
30468 movq %r9,%rax
30469+ pax_force_retaddr
30470 ret
30471 .Lmemset_e:
30472 .previous
30473@@ -45,13 +46,14 @@
30474 *
30475 * rax original destination
30476 */
30477- .section .altinstr_replacement, "ax", @progbits
30478+ .section .altinstr_replacement, "a", @progbits
30479 .Lmemset_c_e:
30480 movq %rdi,%r9
30481 movb %sil,%al
30482 movq %rdx,%rcx
30483 rep stosb
30484 movq %r9,%rax
30485+ pax_force_retaddr
30486 ret
30487 .Lmemset_e_e:
30488 .previous
30489@@ -120,6 +122,7 @@ ENTRY(__memset)
30490
30491 .Lende:
30492 movq %r10,%rax
30493+ pax_force_retaddr
30494 ret
30495
30496 CFI_RESTORE_STATE
30497diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30498index c9f2d9b..e7fd2c0 100644
30499--- a/arch/x86/lib/mmx_32.c
30500+++ b/arch/x86/lib/mmx_32.c
30501@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30502 {
30503 void *p;
30504 int i;
30505+ unsigned long cr0;
30506
30507 if (unlikely(in_interrupt()))
30508 return __memcpy(to, from, len);
30509@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30510 kernel_fpu_begin();
30511
30512 __asm__ __volatile__ (
30513- "1: prefetch (%0)\n" /* This set is 28 bytes */
30514- " prefetch 64(%0)\n"
30515- " prefetch 128(%0)\n"
30516- " prefetch 192(%0)\n"
30517- " prefetch 256(%0)\n"
30518+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30519+ " prefetch 64(%1)\n"
30520+ " prefetch 128(%1)\n"
30521+ " prefetch 192(%1)\n"
30522+ " prefetch 256(%1)\n"
30523 "2: \n"
30524 ".section .fixup, \"ax\"\n"
30525- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30526+ "3: \n"
30527+
30528+#ifdef CONFIG_PAX_KERNEXEC
30529+ " movl %%cr0, %0\n"
30530+ " movl %0, %%eax\n"
30531+ " andl $0xFFFEFFFF, %%eax\n"
30532+ " movl %%eax, %%cr0\n"
30533+#endif
30534+
30535+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30536+
30537+#ifdef CONFIG_PAX_KERNEXEC
30538+ " movl %0, %%cr0\n"
30539+#endif
30540+
30541 " jmp 2b\n"
30542 ".previous\n"
30543 _ASM_EXTABLE(1b, 3b)
30544- : : "r" (from));
30545+ : "=&r" (cr0) : "r" (from) : "ax");
30546
30547 for ( ; i > 5; i--) {
30548 __asm__ __volatile__ (
30549- "1: prefetch 320(%0)\n"
30550- "2: movq (%0), %%mm0\n"
30551- " movq 8(%0), %%mm1\n"
30552- " movq 16(%0), %%mm2\n"
30553- " movq 24(%0), %%mm3\n"
30554- " movq %%mm0, (%1)\n"
30555- " movq %%mm1, 8(%1)\n"
30556- " movq %%mm2, 16(%1)\n"
30557- " movq %%mm3, 24(%1)\n"
30558- " movq 32(%0), %%mm0\n"
30559- " movq 40(%0), %%mm1\n"
30560- " movq 48(%0), %%mm2\n"
30561- " movq 56(%0), %%mm3\n"
30562- " movq %%mm0, 32(%1)\n"
30563- " movq %%mm1, 40(%1)\n"
30564- " movq %%mm2, 48(%1)\n"
30565- " movq %%mm3, 56(%1)\n"
30566+ "1: prefetch 320(%1)\n"
30567+ "2: movq (%1), %%mm0\n"
30568+ " movq 8(%1), %%mm1\n"
30569+ " movq 16(%1), %%mm2\n"
30570+ " movq 24(%1), %%mm3\n"
30571+ " movq %%mm0, (%2)\n"
30572+ " movq %%mm1, 8(%2)\n"
30573+ " movq %%mm2, 16(%2)\n"
30574+ " movq %%mm3, 24(%2)\n"
30575+ " movq 32(%1), %%mm0\n"
30576+ " movq 40(%1), %%mm1\n"
30577+ " movq 48(%1), %%mm2\n"
30578+ " movq 56(%1), %%mm3\n"
30579+ " movq %%mm0, 32(%2)\n"
30580+ " movq %%mm1, 40(%2)\n"
30581+ " movq %%mm2, 48(%2)\n"
30582+ " movq %%mm3, 56(%2)\n"
30583 ".section .fixup, \"ax\"\n"
30584- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30585+ "3:\n"
30586+
30587+#ifdef CONFIG_PAX_KERNEXEC
30588+ " movl %%cr0, %0\n"
30589+ " movl %0, %%eax\n"
30590+ " andl $0xFFFEFFFF, %%eax\n"
30591+ " movl %%eax, %%cr0\n"
30592+#endif
30593+
30594+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30595+
30596+#ifdef CONFIG_PAX_KERNEXEC
30597+ " movl %0, %%cr0\n"
30598+#endif
30599+
30600 " jmp 2b\n"
30601 ".previous\n"
30602 _ASM_EXTABLE(1b, 3b)
30603- : : "r" (from), "r" (to) : "memory");
30604+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30605
30606 from += 64;
30607 to += 64;
30608@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30609 static void fast_copy_page(void *to, void *from)
30610 {
30611 int i;
30612+ unsigned long cr0;
30613
30614 kernel_fpu_begin();
30615
30616@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30617 * but that is for later. -AV
30618 */
30619 __asm__ __volatile__(
30620- "1: prefetch (%0)\n"
30621- " prefetch 64(%0)\n"
30622- " prefetch 128(%0)\n"
30623- " prefetch 192(%0)\n"
30624- " prefetch 256(%0)\n"
30625+ "1: prefetch (%1)\n"
30626+ " prefetch 64(%1)\n"
30627+ " prefetch 128(%1)\n"
30628+ " prefetch 192(%1)\n"
30629+ " prefetch 256(%1)\n"
30630 "2: \n"
30631 ".section .fixup, \"ax\"\n"
30632- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30633+ "3: \n"
30634+
30635+#ifdef CONFIG_PAX_KERNEXEC
30636+ " movl %%cr0, %0\n"
30637+ " movl %0, %%eax\n"
30638+ " andl $0xFFFEFFFF, %%eax\n"
30639+ " movl %%eax, %%cr0\n"
30640+#endif
30641+
30642+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30643+
30644+#ifdef CONFIG_PAX_KERNEXEC
30645+ " movl %0, %%cr0\n"
30646+#endif
30647+
30648 " jmp 2b\n"
30649 ".previous\n"
30650- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30651+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30652
30653 for (i = 0; i < (4096-320)/64; i++) {
30654 __asm__ __volatile__ (
30655- "1: prefetch 320(%0)\n"
30656- "2: movq (%0), %%mm0\n"
30657- " movntq %%mm0, (%1)\n"
30658- " movq 8(%0), %%mm1\n"
30659- " movntq %%mm1, 8(%1)\n"
30660- " movq 16(%0), %%mm2\n"
30661- " movntq %%mm2, 16(%1)\n"
30662- " movq 24(%0), %%mm3\n"
30663- " movntq %%mm3, 24(%1)\n"
30664- " movq 32(%0), %%mm4\n"
30665- " movntq %%mm4, 32(%1)\n"
30666- " movq 40(%0), %%mm5\n"
30667- " movntq %%mm5, 40(%1)\n"
30668- " movq 48(%0), %%mm6\n"
30669- " movntq %%mm6, 48(%1)\n"
30670- " movq 56(%0), %%mm7\n"
30671- " movntq %%mm7, 56(%1)\n"
30672+ "1: prefetch 320(%1)\n"
30673+ "2: movq (%1), %%mm0\n"
30674+ " movntq %%mm0, (%2)\n"
30675+ " movq 8(%1), %%mm1\n"
30676+ " movntq %%mm1, 8(%2)\n"
30677+ " movq 16(%1), %%mm2\n"
30678+ " movntq %%mm2, 16(%2)\n"
30679+ " movq 24(%1), %%mm3\n"
30680+ " movntq %%mm3, 24(%2)\n"
30681+ " movq 32(%1), %%mm4\n"
30682+ " movntq %%mm4, 32(%2)\n"
30683+ " movq 40(%1), %%mm5\n"
30684+ " movntq %%mm5, 40(%2)\n"
30685+ " movq 48(%1), %%mm6\n"
30686+ " movntq %%mm6, 48(%2)\n"
30687+ " movq 56(%1), %%mm7\n"
30688+ " movntq %%mm7, 56(%2)\n"
30689 ".section .fixup, \"ax\"\n"
30690- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30691+ "3:\n"
30692+
30693+#ifdef CONFIG_PAX_KERNEXEC
30694+ " movl %%cr0, %0\n"
30695+ " movl %0, %%eax\n"
30696+ " andl $0xFFFEFFFF, %%eax\n"
30697+ " movl %%eax, %%cr0\n"
30698+#endif
30699+
30700+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30701+
30702+#ifdef CONFIG_PAX_KERNEXEC
30703+ " movl %0, %%cr0\n"
30704+#endif
30705+
30706 " jmp 2b\n"
30707 ".previous\n"
30708- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30709+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30710
30711 from += 64;
30712 to += 64;
30713@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30714 static void fast_copy_page(void *to, void *from)
30715 {
30716 int i;
30717+ unsigned long cr0;
30718
30719 kernel_fpu_begin();
30720
30721 __asm__ __volatile__ (
30722- "1: prefetch (%0)\n"
30723- " prefetch 64(%0)\n"
30724- " prefetch 128(%0)\n"
30725- " prefetch 192(%0)\n"
30726- " prefetch 256(%0)\n"
30727+ "1: prefetch (%1)\n"
30728+ " prefetch 64(%1)\n"
30729+ " prefetch 128(%1)\n"
30730+ " prefetch 192(%1)\n"
30731+ " prefetch 256(%1)\n"
30732 "2: \n"
30733 ".section .fixup, \"ax\"\n"
30734- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30735+ "3: \n"
30736+
30737+#ifdef CONFIG_PAX_KERNEXEC
30738+ " movl %%cr0, %0\n"
30739+ " movl %0, %%eax\n"
30740+ " andl $0xFFFEFFFF, %%eax\n"
30741+ " movl %%eax, %%cr0\n"
30742+#endif
30743+
30744+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30745+
30746+#ifdef CONFIG_PAX_KERNEXEC
30747+ " movl %0, %%cr0\n"
30748+#endif
30749+
30750 " jmp 2b\n"
30751 ".previous\n"
30752- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30753+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30754
30755 for (i = 0; i < 4096/64; i++) {
30756 __asm__ __volatile__ (
30757- "1: prefetch 320(%0)\n"
30758- "2: movq (%0), %%mm0\n"
30759- " movq 8(%0), %%mm1\n"
30760- " movq 16(%0), %%mm2\n"
30761- " movq 24(%0), %%mm3\n"
30762- " movq %%mm0, (%1)\n"
30763- " movq %%mm1, 8(%1)\n"
30764- " movq %%mm2, 16(%1)\n"
30765- " movq %%mm3, 24(%1)\n"
30766- " movq 32(%0), %%mm0\n"
30767- " movq 40(%0), %%mm1\n"
30768- " movq 48(%0), %%mm2\n"
30769- " movq 56(%0), %%mm3\n"
30770- " movq %%mm0, 32(%1)\n"
30771- " movq %%mm1, 40(%1)\n"
30772- " movq %%mm2, 48(%1)\n"
30773- " movq %%mm3, 56(%1)\n"
30774+ "1: prefetch 320(%1)\n"
30775+ "2: movq (%1), %%mm0\n"
30776+ " movq 8(%1), %%mm1\n"
30777+ " movq 16(%1), %%mm2\n"
30778+ " movq 24(%1), %%mm3\n"
30779+ " movq %%mm0, (%2)\n"
30780+ " movq %%mm1, 8(%2)\n"
30781+ " movq %%mm2, 16(%2)\n"
30782+ " movq %%mm3, 24(%2)\n"
30783+ " movq 32(%1), %%mm0\n"
30784+ " movq 40(%1), %%mm1\n"
30785+ " movq 48(%1), %%mm2\n"
30786+ " movq 56(%1), %%mm3\n"
30787+ " movq %%mm0, 32(%2)\n"
30788+ " movq %%mm1, 40(%2)\n"
30789+ " movq %%mm2, 48(%2)\n"
30790+ " movq %%mm3, 56(%2)\n"
30791 ".section .fixup, \"ax\"\n"
30792- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30793+ "3:\n"
30794+
30795+#ifdef CONFIG_PAX_KERNEXEC
30796+ " movl %%cr0, %0\n"
30797+ " movl %0, %%eax\n"
30798+ " andl $0xFFFEFFFF, %%eax\n"
30799+ " movl %%eax, %%cr0\n"
30800+#endif
30801+
30802+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30803+
30804+#ifdef CONFIG_PAX_KERNEXEC
30805+ " movl %0, %%cr0\n"
30806+#endif
30807+
30808 " jmp 2b\n"
30809 ".previous\n"
30810 _ASM_EXTABLE(1b, 3b)
30811- : : "r" (from), "r" (to) : "memory");
30812+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30813
30814 from += 64;
30815 to += 64;
30816diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30817index f6d13ee..d789440 100644
30818--- a/arch/x86/lib/msr-reg.S
30819+++ b/arch/x86/lib/msr-reg.S
30820@@ -3,6 +3,7 @@
30821 #include <asm/dwarf2.h>
30822 #include <asm/asm.h>
30823 #include <asm/msr.h>
30824+#include <asm/alternative-asm.h>
30825
30826 #ifdef CONFIG_X86_64
30827 /*
30828@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30829 movl %edi, 28(%r10)
30830 popq_cfi %rbp
30831 popq_cfi %rbx
30832+ pax_force_retaddr
30833 ret
30834 3:
30835 CFI_RESTORE_STATE
30836diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30837index fc6ba17..14ad9a5 100644
30838--- a/arch/x86/lib/putuser.S
30839+++ b/arch/x86/lib/putuser.S
30840@@ -16,7 +16,9 @@
30841 #include <asm/errno.h>
30842 #include <asm/asm.h>
30843 #include <asm/smap.h>
30844-
30845+#include <asm/segment.h>
30846+#include <asm/pgtable.h>
30847+#include <asm/alternative-asm.h>
30848
30849 /*
30850 * __put_user_X
30851@@ -30,57 +32,125 @@
30852 * as they get called from within inline assembly.
30853 */
30854
30855-#define ENTER CFI_STARTPROC ; \
30856- GET_THREAD_INFO(%_ASM_BX)
30857-#define EXIT ASM_CLAC ; \
30858- ret ; \
30859+#define ENTER CFI_STARTPROC
30860+#define EXIT ASM_CLAC ; \
30861+ pax_force_retaddr ; \
30862+ ret ; \
30863 CFI_ENDPROC
30864
30865+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30866+#define _DEST %_ASM_CX,%_ASM_BX
30867+#else
30868+#define _DEST %_ASM_CX
30869+#endif
30870+
30871+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30872+#define __copyuser_seg gs;
30873+#else
30874+#define __copyuser_seg
30875+#endif
30876+
30877 .text
30878 ENTRY(__put_user_1)
30879 ENTER
30880+
30881+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30882+ GET_THREAD_INFO(%_ASM_BX)
30883 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30884 jae bad_put_user
30885+
30886+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30887+ mov pax_user_shadow_base,%_ASM_BX
30888+ cmp %_ASM_BX,%_ASM_CX
30889+ jb 1234f
30890+ xor %ebx,%ebx
30891+1234:
30892+#endif
30893+
30894+#endif
30895+
30896 ASM_STAC
30897-1: movb %al,(%_ASM_CX)
30898+1: __copyuser_seg movb %al,(_DEST)
30899 xor %eax,%eax
30900 EXIT
30901 ENDPROC(__put_user_1)
30902
30903 ENTRY(__put_user_2)
30904 ENTER
30905+
30906+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30907+ GET_THREAD_INFO(%_ASM_BX)
30908 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30909 sub $1,%_ASM_BX
30910 cmp %_ASM_BX,%_ASM_CX
30911 jae bad_put_user
30912+
30913+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30914+ mov pax_user_shadow_base,%_ASM_BX
30915+ cmp %_ASM_BX,%_ASM_CX
30916+ jb 1234f
30917+ xor %ebx,%ebx
30918+1234:
30919+#endif
30920+
30921+#endif
30922+
30923 ASM_STAC
30924-2: movw %ax,(%_ASM_CX)
30925+2: __copyuser_seg movw %ax,(_DEST)
30926 xor %eax,%eax
30927 EXIT
30928 ENDPROC(__put_user_2)
30929
30930 ENTRY(__put_user_4)
30931 ENTER
30932+
30933+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30934+ GET_THREAD_INFO(%_ASM_BX)
30935 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30936 sub $3,%_ASM_BX
30937 cmp %_ASM_BX,%_ASM_CX
30938 jae bad_put_user
30939+
30940+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30941+ mov pax_user_shadow_base,%_ASM_BX
30942+ cmp %_ASM_BX,%_ASM_CX
30943+ jb 1234f
30944+ xor %ebx,%ebx
30945+1234:
30946+#endif
30947+
30948+#endif
30949+
30950 ASM_STAC
30951-3: movl %eax,(%_ASM_CX)
30952+3: __copyuser_seg movl %eax,(_DEST)
30953 xor %eax,%eax
30954 EXIT
30955 ENDPROC(__put_user_4)
30956
30957 ENTRY(__put_user_8)
30958 ENTER
30959+
30960+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30961+ GET_THREAD_INFO(%_ASM_BX)
30962 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30963 sub $7,%_ASM_BX
30964 cmp %_ASM_BX,%_ASM_CX
30965 jae bad_put_user
30966+
30967+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30968+ mov pax_user_shadow_base,%_ASM_BX
30969+ cmp %_ASM_BX,%_ASM_CX
30970+ jb 1234f
30971+ xor %ebx,%ebx
30972+1234:
30973+#endif
30974+
30975+#endif
30976+
30977 ASM_STAC
30978-4: mov %_ASM_AX,(%_ASM_CX)
30979+4: __copyuser_seg mov %_ASM_AX,(_DEST)
30980 #ifdef CONFIG_X86_32
30981-5: movl %edx,4(%_ASM_CX)
30982+5: __copyuser_seg movl %edx,4(_DEST)
30983 #endif
30984 xor %eax,%eax
30985 EXIT
30986diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
30987index 5dff5f0..cadebf4 100644
30988--- a/arch/x86/lib/rwsem.S
30989+++ b/arch/x86/lib/rwsem.S
30990@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
30991 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
30992 CFI_RESTORE __ASM_REG(dx)
30993 restore_common_regs
30994+ pax_force_retaddr
30995 ret
30996 CFI_ENDPROC
30997 ENDPROC(call_rwsem_down_read_failed)
30998@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
30999 movq %rax,%rdi
31000 call rwsem_down_write_failed
31001 restore_common_regs
31002+ pax_force_retaddr
31003 ret
31004 CFI_ENDPROC
31005 ENDPROC(call_rwsem_down_write_failed)
31006@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31007 movq %rax,%rdi
31008 call rwsem_wake
31009 restore_common_regs
31010-1: ret
31011+1: pax_force_retaddr
31012+ ret
31013 CFI_ENDPROC
31014 ENDPROC(call_rwsem_wake)
31015
31016@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31017 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31018 CFI_RESTORE __ASM_REG(dx)
31019 restore_common_regs
31020+ pax_force_retaddr
31021 ret
31022 CFI_ENDPROC
31023 ENDPROC(call_rwsem_downgrade_wake)
31024diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31025index b30b5eb..2b57052 100644
31026--- a/arch/x86/lib/thunk_64.S
31027+++ b/arch/x86/lib/thunk_64.S
31028@@ -9,6 +9,7 @@
31029 #include <asm/dwarf2.h>
31030 #include <asm/calling.h>
31031 #include <asm/asm.h>
31032+#include <asm/alternative-asm.h>
31033
31034 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31035 .macro THUNK name, func, put_ret_addr_in_rdi=0
31036@@ -16,11 +17,11 @@
31037 \name:
31038 CFI_STARTPROC
31039
31040- /* this one pushes 9 elems, the next one would be %rIP */
31041- SAVE_ARGS
31042+ /* this one pushes 15+1 elems, the next one would be %rIP */
31043+ SAVE_ARGS 8
31044
31045 .if \put_ret_addr_in_rdi
31046- movq_cfi_restore 9*8, rdi
31047+ movq_cfi_restore RIP, rdi
31048 .endif
31049
31050 call \func
31051@@ -47,9 +48,10 @@
31052
31053 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31054 CFI_STARTPROC
31055- SAVE_ARGS
31056+ SAVE_ARGS 8
31057 restore:
31058- RESTORE_ARGS
31059+ RESTORE_ARGS 1,8
31060+ pax_force_retaddr
31061 ret
31062 CFI_ENDPROC
31063 _ASM_NOKPROBE(restore)
31064diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31065index e2f5e21..4b22130 100644
31066--- a/arch/x86/lib/usercopy_32.c
31067+++ b/arch/x86/lib/usercopy_32.c
31068@@ -42,11 +42,13 @@ do { \
31069 int __d0; \
31070 might_fault(); \
31071 __asm__ __volatile__( \
31072+ __COPYUSER_SET_ES \
31073 ASM_STAC "\n" \
31074 "0: rep; stosl\n" \
31075 " movl %2,%0\n" \
31076 "1: rep; stosb\n" \
31077 "2: " ASM_CLAC "\n" \
31078+ __COPYUSER_RESTORE_ES \
31079 ".section .fixup,\"ax\"\n" \
31080 "3: lea 0(%2,%0,4),%0\n" \
31081 " jmp 2b\n" \
31082@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31083
31084 #ifdef CONFIG_X86_INTEL_USERCOPY
31085 static unsigned long
31086-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31087+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31088 {
31089 int d0, d1;
31090 __asm__ __volatile__(
31091@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31092 " .align 2,0x90\n"
31093 "3: movl 0(%4), %%eax\n"
31094 "4: movl 4(%4), %%edx\n"
31095- "5: movl %%eax, 0(%3)\n"
31096- "6: movl %%edx, 4(%3)\n"
31097+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31098+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31099 "7: movl 8(%4), %%eax\n"
31100 "8: movl 12(%4),%%edx\n"
31101- "9: movl %%eax, 8(%3)\n"
31102- "10: movl %%edx, 12(%3)\n"
31103+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31104+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31105 "11: movl 16(%4), %%eax\n"
31106 "12: movl 20(%4), %%edx\n"
31107- "13: movl %%eax, 16(%3)\n"
31108- "14: movl %%edx, 20(%3)\n"
31109+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31110+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31111 "15: movl 24(%4), %%eax\n"
31112 "16: movl 28(%4), %%edx\n"
31113- "17: movl %%eax, 24(%3)\n"
31114- "18: movl %%edx, 28(%3)\n"
31115+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31116+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31117 "19: movl 32(%4), %%eax\n"
31118 "20: movl 36(%4), %%edx\n"
31119- "21: movl %%eax, 32(%3)\n"
31120- "22: movl %%edx, 36(%3)\n"
31121+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31122+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31123 "23: movl 40(%4), %%eax\n"
31124 "24: movl 44(%4), %%edx\n"
31125- "25: movl %%eax, 40(%3)\n"
31126- "26: movl %%edx, 44(%3)\n"
31127+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31128+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31129 "27: movl 48(%4), %%eax\n"
31130 "28: movl 52(%4), %%edx\n"
31131- "29: movl %%eax, 48(%3)\n"
31132- "30: movl %%edx, 52(%3)\n"
31133+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31134+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31135 "31: movl 56(%4), %%eax\n"
31136 "32: movl 60(%4), %%edx\n"
31137- "33: movl %%eax, 56(%3)\n"
31138- "34: movl %%edx, 60(%3)\n"
31139+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31140+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31141 " addl $-64, %0\n"
31142 " addl $64, %4\n"
31143 " addl $64, %3\n"
31144@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31145 " shrl $2, %0\n"
31146 " andl $3, %%eax\n"
31147 " cld\n"
31148+ __COPYUSER_SET_ES
31149 "99: rep; movsl\n"
31150 "36: movl %%eax, %0\n"
31151 "37: rep; movsb\n"
31152 "100:\n"
31153+ __COPYUSER_RESTORE_ES
31154+ ".section .fixup,\"ax\"\n"
31155+ "101: lea 0(%%eax,%0,4),%0\n"
31156+ " jmp 100b\n"
31157+ ".previous\n"
31158+ _ASM_EXTABLE(1b,100b)
31159+ _ASM_EXTABLE(2b,100b)
31160+ _ASM_EXTABLE(3b,100b)
31161+ _ASM_EXTABLE(4b,100b)
31162+ _ASM_EXTABLE(5b,100b)
31163+ _ASM_EXTABLE(6b,100b)
31164+ _ASM_EXTABLE(7b,100b)
31165+ _ASM_EXTABLE(8b,100b)
31166+ _ASM_EXTABLE(9b,100b)
31167+ _ASM_EXTABLE(10b,100b)
31168+ _ASM_EXTABLE(11b,100b)
31169+ _ASM_EXTABLE(12b,100b)
31170+ _ASM_EXTABLE(13b,100b)
31171+ _ASM_EXTABLE(14b,100b)
31172+ _ASM_EXTABLE(15b,100b)
31173+ _ASM_EXTABLE(16b,100b)
31174+ _ASM_EXTABLE(17b,100b)
31175+ _ASM_EXTABLE(18b,100b)
31176+ _ASM_EXTABLE(19b,100b)
31177+ _ASM_EXTABLE(20b,100b)
31178+ _ASM_EXTABLE(21b,100b)
31179+ _ASM_EXTABLE(22b,100b)
31180+ _ASM_EXTABLE(23b,100b)
31181+ _ASM_EXTABLE(24b,100b)
31182+ _ASM_EXTABLE(25b,100b)
31183+ _ASM_EXTABLE(26b,100b)
31184+ _ASM_EXTABLE(27b,100b)
31185+ _ASM_EXTABLE(28b,100b)
31186+ _ASM_EXTABLE(29b,100b)
31187+ _ASM_EXTABLE(30b,100b)
31188+ _ASM_EXTABLE(31b,100b)
31189+ _ASM_EXTABLE(32b,100b)
31190+ _ASM_EXTABLE(33b,100b)
31191+ _ASM_EXTABLE(34b,100b)
31192+ _ASM_EXTABLE(35b,100b)
31193+ _ASM_EXTABLE(36b,100b)
31194+ _ASM_EXTABLE(37b,100b)
31195+ _ASM_EXTABLE(99b,101b)
31196+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31197+ : "1"(to), "2"(from), "0"(size)
31198+ : "eax", "edx", "memory");
31199+ return size;
31200+}
31201+
31202+static unsigned long
31203+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31204+{
31205+ int d0, d1;
31206+ __asm__ __volatile__(
31207+ " .align 2,0x90\n"
31208+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31209+ " cmpl $67, %0\n"
31210+ " jbe 3f\n"
31211+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31212+ " .align 2,0x90\n"
31213+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31214+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31215+ "5: movl %%eax, 0(%3)\n"
31216+ "6: movl %%edx, 4(%3)\n"
31217+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31218+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31219+ "9: movl %%eax, 8(%3)\n"
31220+ "10: movl %%edx, 12(%3)\n"
31221+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31222+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31223+ "13: movl %%eax, 16(%3)\n"
31224+ "14: movl %%edx, 20(%3)\n"
31225+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31226+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31227+ "17: movl %%eax, 24(%3)\n"
31228+ "18: movl %%edx, 28(%3)\n"
31229+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31230+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31231+ "21: movl %%eax, 32(%3)\n"
31232+ "22: movl %%edx, 36(%3)\n"
31233+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31234+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31235+ "25: movl %%eax, 40(%3)\n"
31236+ "26: movl %%edx, 44(%3)\n"
31237+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31238+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31239+ "29: movl %%eax, 48(%3)\n"
31240+ "30: movl %%edx, 52(%3)\n"
31241+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31242+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31243+ "33: movl %%eax, 56(%3)\n"
31244+ "34: movl %%edx, 60(%3)\n"
31245+ " addl $-64, %0\n"
31246+ " addl $64, %4\n"
31247+ " addl $64, %3\n"
31248+ " cmpl $63, %0\n"
31249+ " ja 1b\n"
31250+ "35: movl %0, %%eax\n"
31251+ " shrl $2, %0\n"
31252+ " andl $3, %%eax\n"
31253+ " cld\n"
31254+ "99: rep; "__copyuser_seg" movsl\n"
31255+ "36: movl %%eax, %0\n"
31256+ "37: rep; "__copyuser_seg" movsb\n"
31257+ "100:\n"
31258 ".section .fixup,\"ax\"\n"
31259 "101: lea 0(%%eax,%0,4),%0\n"
31260 " jmp 100b\n"
31261@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31262 int d0, d1;
31263 __asm__ __volatile__(
31264 " .align 2,0x90\n"
31265- "0: movl 32(%4), %%eax\n"
31266+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31267 " cmpl $67, %0\n"
31268 " jbe 2f\n"
31269- "1: movl 64(%4), %%eax\n"
31270+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31271 " .align 2,0x90\n"
31272- "2: movl 0(%4), %%eax\n"
31273- "21: movl 4(%4), %%edx\n"
31274+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31275+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31276 " movl %%eax, 0(%3)\n"
31277 " movl %%edx, 4(%3)\n"
31278- "3: movl 8(%4), %%eax\n"
31279- "31: movl 12(%4),%%edx\n"
31280+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31281+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31282 " movl %%eax, 8(%3)\n"
31283 " movl %%edx, 12(%3)\n"
31284- "4: movl 16(%4), %%eax\n"
31285- "41: movl 20(%4), %%edx\n"
31286+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31287+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31288 " movl %%eax, 16(%3)\n"
31289 " movl %%edx, 20(%3)\n"
31290- "10: movl 24(%4), %%eax\n"
31291- "51: movl 28(%4), %%edx\n"
31292+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31293+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31294 " movl %%eax, 24(%3)\n"
31295 " movl %%edx, 28(%3)\n"
31296- "11: movl 32(%4), %%eax\n"
31297- "61: movl 36(%4), %%edx\n"
31298+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31299+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31300 " movl %%eax, 32(%3)\n"
31301 " movl %%edx, 36(%3)\n"
31302- "12: movl 40(%4), %%eax\n"
31303- "71: movl 44(%4), %%edx\n"
31304+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31305+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31306 " movl %%eax, 40(%3)\n"
31307 " movl %%edx, 44(%3)\n"
31308- "13: movl 48(%4), %%eax\n"
31309- "81: movl 52(%4), %%edx\n"
31310+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31311+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31312 " movl %%eax, 48(%3)\n"
31313 " movl %%edx, 52(%3)\n"
31314- "14: movl 56(%4), %%eax\n"
31315- "91: movl 60(%4), %%edx\n"
31316+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31317+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31318 " movl %%eax, 56(%3)\n"
31319 " movl %%edx, 60(%3)\n"
31320 " addl $-64, %0\n"
31321@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31322 " shrl $2, %0\n"
31323 " andl $3, %%eax\n"
31324 " cld\n"
31325- "6: rep; movsl\n"
31326+ "6: rep; "__copyuser_seg" movsl\n"
31327 " movl %%eax,%0\n"
31328- "7: rep; movsb\n"
31329+ "7: rep; "__copyuser_seg" movsb\n"
31330 "8:\n"
31331 ".section .fixup,\"ax\"\n"
31332 "9: lea 0(%%eax,%0,4),%0\n"
31333@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31334
31335 __asm__ __volatile__(
31336 " .align 2,0x90\n"
31337- "0: movl 32(%4), %%eax\n"
31338+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31339 " cmpl $67, %0\n"
31340 " jbe 2f\n"
31341- "1: movl 64(%4), %%eax\n"
31342+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31343 " .align 2,0x90\n"
31344- "2: movl 0(%4), %%eax\n"
31345- "21: movl 4(%4), %%edx\n"
31346+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31347+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31348 " movnti %%eax, 0(%3)\n"
31349 " movnti %%edx, 4(%3)\n"
31350- "3: movl 8(%4), %%eax\n"
31351- "31: movl 12(%4),%%edx\n"
31352+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31353+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31354 " movnti %%eax, 8(%3)\n"
31355 " movnti %%edx, 12(%3)\n"
31356- "4: movl 16(%4), %%eax\n"
31357- "41: movl 20(%4), %%edx\n"
31358+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31359+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31360 " movnti %%eax, 16(%3)\n"
31361 " movnti %%edx, 20(%3)\n"
31362- "10: movl 24(%4), %%eax\n"
31363- "51: movl 28(%4), %%edx\n"
31364+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31365+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31366 " movnti %%eax, 24(%3)\n"
31367 " movnti %%edx, 28(%3)\n"
31368- "11: movl 32(%4), %%eax\n"
31369- "61: movl 36(%4), %%edx\n"
31370+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31371+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31372 " movnti %%eax, 32(%3)\n"
31373 " movnti %%edx, 36(%3)\n"
31374- "12: movl 40(%4), %%eax\n"
31375- "71: movl 44(%4), %%edx\n"
31376+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31377+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31378 " movnti %%eax, 40(%3)\n"
31379 " movnti %%edx, 44(%3)\n"
31380- "13: movl 48(%4), %%eax\n"
31381- "81: movl 52(%4), %%edx\n"
31382+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31383+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31384 " movnti %%eax, 48(%3)\n"
31385 " movnti %%edx, 52(%3)\n"
31386- "14: movl 56(%4), %%eax\n"
31387- "91: movl 60(%4), %%edx\n"
31388+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31389+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31390 " movnti %%eax, 56(%3)\n"
31391 " movnti %%edx, 60(%3)\n"
31392 " addl $-64, %0\n"
31393@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31394 " shrl $2, %0\n"
31395 " andl $3, %%eax\n"
31396 " cld\n"
31397- "6: rep; movsl\n"
31398+ "6: rep; "__copyuser_seg" movsl\n"
31399 " movl %%eax,%0\n"
31400- "7: rep; movsb\n"
31401+ "7: rep; "__copyuser_seg" movsb\n"
31402 "8:\n"
31403 ".section .fixup,\"ax\"\n"
31404 "9: lea 0(%%eax,%0,4),%0\n"
31405@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31406
31407 __asm__ __volatile__(
31408 " .align 2,0x90\n"
31409- "0: movl 32(%4), %%eax\n"
31410+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31411 " cmpl $67, %0\n"
31412 " jbe 2f\n"
31413- "1: movl 64(%4), %%eax\n"
31414+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31415 " .align 2,0x90\n"
31416- "2: movl 0(%4), %%eax\n"
31417- "21: movl 4(%4), %%edx\n"
31418+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31419+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31420 " movnti %%eax, 0(%3)\n"
31421 " movnti %%edx, 4(%3)\n"
31422- "3: movl 8(%4), %%eax\n"
31423- "31: movl 12(%4),%%edx\n"
31424+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31425+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31426 " movnti %%eax, 8(%3)\n"
31427 " movnti %%edx, 12(%3)\n"
31428- "4: movl 16(%4), %%eax\n"
31429- "41: movl 20(%4), %%edx\n"
31430+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31431+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31432 " movnti %%eax, 16(%3)\n"
31433 " movnti %%edx, 20(%3)\n"
31434- "10: movl 24(%4), %%eax\n"
31435- "51: movl 28(%4), %%edx\n"
31436+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31437+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31438 " movnti %%eax, 24(%3)\n"
31439 " movnti %%edx, 28(%3)\n"
31440- "11: movl 32(%4), %%eax\n"
31441- "61: movl 36(%4), %%edx\n"
31442+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31443+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31444 " movnti %%eax, 32(%3)\n"
31445 " movnti %%edx, 36(%3)\n"
31446- "12: movl 40(%4), %%eax\n"
31447- "71: movl 44(%4), %%edx\n"
31448+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31449+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31450 " movnti %%eax, 40(%3)\n"
31451 " movnti %%edx, 44(%3)\n"
31452- "13: movl 48(%4), %%eax\n"
31453- "81: movl 52(%4), %%edx\n"
31454+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31455+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31456 " movnti %%eax, 48(%3)\n"
31457 " movnti %%edx, 52(%3)\n"
31458- "14: movl 56(%4), %%eax\n"
31459- "91: movl 60(%4), %%edx\n"
31460+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31461+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31462 " movnti %%eax, 56(%3)\n"
31463 " movnti %%edx, 60(%3)\n"
31464 " addl $-64, %0\n"
31465@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31466 " shrl $2, %0\n"
31467 " andl $3, %%eax\n"
31468 " cld\n"
31469- "6: rep; movsl\n"
31470+ "6: rep; "__copyuser_seg" movsl\n"
31471 " movl %%eax,%0\n"
31472- "7: rep; movsb\n"
31473+ "7: rep; "__copyuser_seg" movsb\n"
31474 "8:\n"
31475 ".section .fixup,\"ax\"\n"
31476 "9: lea 0(%%eax,%0,4),%0\n"
31477@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31478 */
31479 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31480 unsigned long size);
31481-unsigned long __copy_user_intel(void __user *to, const void *from,
31482+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31483+ unsigned long size);
31484+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31485 unsigned long size);
31486 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31487 const void __user *from, unsigned long size);
31488 #endif /* CONFIG_X86_INTEL_USERCOPY */
31489
31490 /* Generic arbitrary sized copy. */
31491-#define __copy_user(to, from, size) \
31492+#define __copy_user(to, from, size, prefix, set, restore) \
31493 do { \
31494 int __d0, __d1, __d2; \
31495 __asm__ __volatile__( \
31496+ set \
31497 " cmp $7,%0\n" \
31498 " jbe 1f\n" \
31499 " movl %1,%0\n" \
31500 " negl %0\n" \
31501 " andl $7,%0\n" \
31502 " subl %0,%3\n" \
31503- "4: rep; movsb\n" \
31504+ "4: rep; "prefix"movsb\n" \
31505 " movl %3,%0\n" \
31506 " shrl $2,%0\n" \
31507 " andl $3,%3\n" \
31508 " .align 2,0x90\n" \
31509- "0: rep; movsl\n" \
31510+ "0: rep; "prefix"movsl\n" \
31511 " movl %3,%0\n" \
31512- "1: rep; movsb\n" \
31513+ "1: rep; "prefix"movsb\n" \
31514 "2:\n" \
31515+ restore \
31516 ".section .fixup,\"ax\"\n" \
31517 "5: addl %3,%0\n" \
31518 " jmp 2b\n" \
31519@@ -538,14 +650,14 @@ do { \
31520 " negl %0\n" \
31521 " andl $7,%0\n" \
31522 " subl %0,%3\n" \
31523- "4: rep; movsb\n" \
31524+ "4: rep; "__copyuser_seg"movsb\n" \
31525 " movl %3,%0\n" \
31526 " shrl $2,%0\n" \
31527 " andl $3,%3\n" \
31528 " .align 2,0x90\n" \
31529- "0: rep; movsl\n" \
31530+ "0: rep; "__copyuser_seg"movsl\n" \
31531 " movl %3,%0\n" \
31532- "1: rep; movsb\n" \
31533+ "1: rep; "__copyuser_seg"movsb\n" \
31534 "2:\n" \
31535 ".section .fixup,\"ax\"\n" \
31536 "5: addl %3,%0\n" \
31537@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31538 {
31539 stac();
31540 if (movsl_is_ok(to, from, n))
31541- __copy_user(to, from, n);
31542+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31543 else
31544- n = __copy_user_intel(to, from, n);
31545+ n = __generic_copy_to_user_intel(to, from, n);
31546 clac();
31547 return n;
31548 }
31549@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31550 {
31551 stac();
31552 if (movsl_is_ok(to, from, n))
31553- __copy_user(to, from, n);
31554+ __copy_user(to, from, n, __copyuser_seg, "", "");
31555 else
31556- n = __copy_user_intel((void __user *)to,
31557- (const void *)from, n);
31558+ n = __generic_copy_from_user_intel(to, from, n);
31559 clac();
31560 return n;
31561 }
31562@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31563 if (n > 64 && cpu_has_xmm2)
31564 n = __copy_user_intel_nocache(to, from, n);
31565 else
31566- __copy_user(to, from, n);
31567+ __copy_user(to, from, n, __copyuser_seg, "", "");
31568 #else
31569- __copy_user(to, from, n);
31570+ __copy_user(to, from, n, __copyuser_seg, "", "");
31571 #endif
31572 clac();
31573 return n;
31574 }
31575 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31576
31577-/**
31578- * copy_to_user: - Copy a block of data into user space.
31579- * @to: Destination address, in user space.
31580- * @from: Source address, in kernel space.
31581- * @n: Number of bytes to copy.
31582- *
31583- * Context: User context only. This function may sleep.
31584- *
31585- * Copy data from kernel space to user space.
31586- *
31587- * Returns number of bytes that could not be copied.
31588- * On success, this will be zero.
31589- */
31590-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31591+#ifdef CONFIG_PAX_MEMORY_UDEREF
31592+void __set_fs(mm_segment_t x)
31593 {
31594- if (access_ok(VERIFY_WRITE, to, n))
31595- n = __copy_to_user(to, from, n);
31596- return n;
31597+ switch (x.seg) {
31598+ case 0:
31599+ loadsegment(gs, 0);
31600+ break;
31601+ case TASK_SIZE_MAX:
31602+ loadsegment(gs, __USER_DS);
31603+ break;
31604+ case -1UL:
31605+ loadsegment(gs, __KERNEL_DS);
31606+ break;
31607+ default:
31608+ BUG();
31609+ }
31610 }
31611-EXPORT_SYMBOL(_copy_to_user);
31612+EXPORT_SYMBOL(__set_fs);
31613
31614-/**
31615- * copy_from_user: - Copy a block of data from user space.
31616- * @to: Destination address, in kernel space.
31617- * @from: Source address, in user space.
31618- * @n: Number of bytes to copy.
31619- *
31620- * Context: User context only. This function may sleep.
31621- *
31622- * Copy data from user space to kernel space.
31623- *
31624- * Returns number of bytes that could not be copied.
31625- * On success, this will be zero.
31626- *
31627- * If some data could not be copied, this function will pad the copied
31628- * data to the requested size using zero bytes.
31629- */
31630-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31631+void set_fs(mm_segment_t x)
31632 {
31633- if (access_ok(VERIFY_READ, from, n))
31634- n = __copy_from_user(to, from, n);
31635- else
31636- memset(to, 0, n);
31637- return n;
31638+ current_thread_info()->addr_limit = x;
31639+ __set_fs(x);
31640 }
31641-EXPORT_SYMBOL(_copy_from_user);
31642+EXPORT_SYMBOL(set_fs);
31643+#endif
31644diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31645index 0a42327..7a82465 100644
31646--- a/arch/x86/lib/usercopy_64.c
31647+++ b/arch/x86/lib/usercopy_64.c
31648@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31649 might_fault();
31650 /* no memory constraint because it doesn't change any memory gcc knows
31651 about */
31652+ pax_open_userland();
31653 stac();
31654 asm volatile(
31655 " testq %[size8],%[size8]\n"
31656@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31657 _ASM_EXTABLE(0b,3b)
31658 _ASM_EXTABLE(1b,2b)
31659 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31660- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31661+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31662 [zero] "r" (0UL), [eight] "r" (8UL));
31663 clac();
31664+ pax_close_userland();
31665 return size;
31666 }
31667 EXPORT_SYMBOL(__clear_user);
31668@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31669 }
31670 EXPORT_SYMBOL(clear_user);
31671
31672-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31673+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31674 {
31675- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31676- return copy_user_generic((__force void *)to, (__force void *)from, len);
31677- }
31678- return len;
31679+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31680+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31681+ return len;
31682 }
31683 EXPORT_SYMBOL(copy_in_user);
31684
31685@@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31686 * it is not necessary to optimize tail handling.
31687 */
31688 __visible unsigned long
31689-copy_user_handle_tail(char *to, char *from, unsigned len)
31690+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31691 {
31692+ clac();
31693+ pax_close_userland();
31694 for (; len; --len, to++) {
31695 char c;
31696
31697@@ -79,10 +82,9 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31698 if (__put_user_nocheck(c, to, sizeof(char)))
31699 break;
31700 }
31701- clac();
31702
31703 /* If the destination is a kernel buffer, we always clear the end */
31704- if (!__addr_ok(to))
31705+ if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base)
31706 memset(to, 0, len);
31707 return len;
31708 }
31709diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31710index c4cc740..60a7362 100644
31711--- a/arch/x86/mm/Makefile
31712+++ b/arch/x86/mm/Makefile
31713@@ -35,3 +35,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31714 obj-$(CONFIG_MEMTEST) += memtest.o
31715
31716 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31717+
31718+quote:="
31719+obj-$(CONFIG_X86_64) += uderef_64.o
31720+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31721diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31722index 903ec1e..c4166b2 100644
31723--- a/arch/x86/mm/extable.c
31724+++ b/arch/x86/mm/extable.c
31725@@ -6,12 +6,24 @@
31726 static inline unsigned long
31727 ex_insn_addr(const struct exception_table_entry *x)
31728 {
31729- return (unsigned long)&x->insn + x->insn;
31730+ unsigned long reloc = 0;
31731+
31732+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31733+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31734+#endif
31735+
31736+ return (unsigned long)&x->insn + x->insn + reloc;
31737 }
31738 static inline unsigned long
31739 ex_fixup_addr(const struct exception_table_entry *x)
31740 {
31741- return (unsigned long)&x->fixup + x->fixup;
31742+ unsigned long reloc = 0;
31743+
31744+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31745+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31746+#endif
31747+
31748+ return (unsigned long)&x->fixup + x->fixup + reloc;
31749 }
31750
31751 int fixup_exception(struct pt_regs *regs)
31752@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31753 unsigned long new_ip;
31754
31755 #ifdef CONFIG_PNPBIOS
31756- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31757+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31758 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31759 extern u32 pnp_bios_is_utter_crap;
31760 pnp_bios_is_utter_crap = 1;
31761@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31762 i += 4;
31763 p->fixup -= i;
31764 i += 4;
31765+
31766+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31767+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31768+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31769+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31770+#endif
31771+
31772 }
31773 }
31774
31775diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31776index ede025f..380466b 100644
31777--- a/arch/x86/mm/fault.c
31778+++ b/arch/x86/mm/fault.c
31779@@ -13,12 +13,19 @@
31780 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31781 #include <linux/prefetch.h> /* prefetchw */
31782 #include <linux/context_tracking.h> /* exception_enter(), ... */
31783+#include <linux/unistd.h>
31784+#include <linux/compiler.h>
31785
31786 #include <asm/traps.h> /* dotraplinkage, ... */
31787 #include <asm/pgalloc.h> /* pgd_*(), ... */
31788 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31789 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31790 #include <asm/vsyscall.h> /* emulate_vsyscall */
31791+#include <asm/tlbflush.h>
31792+
31793+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31794+#include <asm/stacktrace.h>
31795+#endif
31796
31797 #define CREATE_TRACE_POINTS
31798 #include <asm/trace/exceptions.h>
31799@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31800 int ret = 0;
31801
31802 /* kprobe_running() needs smp_processor_id() */
31803- if (kprobes_built_in() && !user_mode_vm(regs)) {
31804+ if (kprobes_built_in() && !user_mode(regs)) {
31805 preempt_disable();
31806 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31807 ret = 1;
31808@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31809 return !instr_lo || (instr_lo>>1) == 1;
31810 case 0x00:
31811 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31812- if (probe_kernel_address(instr, opcode))
31813+ if (user_mode(regs)) {
31814+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31815+ return 0;
31816+ } else if (probe_kernel_address(instr, opcode))
31817 return 0;
31818
31819 *prefetch = (instr_lo == 0xF) &&
31820@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31821 while (instr < max_instr) {
31822 unsigned char opcode;
31823
31824- if (probe_kernel_address(instr, opcode))
31825+ if (user_mode(regs)) {
31826+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31827+ break;
31828+ } else if (probe_kernel_address(instr, opcode))
31829 break;
31830
31831 instr++;
31832@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31833 force_sig_info(si_signo, &info, tsk);
31834 }
31835
31836+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31837+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31838+#endif
31839+
31840+#ifdef CONFIG_PAX_EMUTRAMP
31841+static int pax_handle_fetch_fault(struct pt_regs *regs);
31842+#endif
31843+
31844+#ifdef CONFIG_PAX_PAGEEXEC
31845+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31846+{
31847+ pgd_t *pgd;
31848+ pud_t *pud;
31849+ pmd_t *pmd;
31850+
31851+ pgd = pgd_offset(mm, address);
31852+ if (!pgd_present(*pgd))
31853+ return NULL;
31854+ pud = pud_offset(pgd, address);
31855+ if (!pud_present(*pud))
31856+ return NULL;
31857+ pmd = pmd_offset(pud, address);
31858+ if (!pmd_present(*pmd))
31859+ return NULL;
31860+ return pmd;
31861+}
31862+#endif
31863+
31864 DEFINE_SPINLOCK(pgd_lock);
31865 LIST_HEAD(pgd_list);
31866
31867@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31868 for (address = VMALLOC_START & PMD_MASK;
31869 address >= TASK_SIZE && address < FIXADDR_TOP;
31870 address += PMD_SIZE) {
31871+
31872+#ifdef CONFIG_PAX_PER_CPU_PGD
31873+ unsigned long cpu;
31874+#else
31875 struct page *page;
31876+#endif
31877
31878 spin_lock(&pgd_lock);
31879+
31880+#ifdef CONFIG_PAX_PER_CPU_PGD
31881+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31882+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31883+ pmd_t *ret;
31884+
31885+ ret = vmalloc_sync_one(pgd, address);
31886+ if (!ret)
31887+ break;
31888+ pgd = get_cpu_pgd(cpu, kernel);
31889+#else
31890 list_for_each_entry(page, &pgd_list, lru) {
31891+ pgd_t *pgd;
31892 spinlock_t *pgt_lock;
31893 pmd_t *ret;
31894
31895@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31896 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31897
31898 spin_lock(pgt_lock);
31899- ret = vmalloc_sync_one(page_address(page), address);
31900+ pgd = page_address(page);
31901+#endif
31902+
31903+ ret = vmalloc_sync_one(pgd, address);
31904+
31905+#ifndef CONFIG_PAX_PER_CPU_PGD
31906 spin_unlock(pgt_lock);
31907+#endif
31908
31909 if (!ret)
31910 break;
31911@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31912 * an interrupt in the middle of a task switch..
31913 */
31914 pgd_paddr = read_cr3();
31915+
31916+#ifdef CONFIG_PAX_PER_CPU_PGD
31917+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31918+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31919+#endif
31920+
31921 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31922 if (!pmd_k)
31923 return -1;
31924@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31925 * happen within a race in page table update. In the later
31926 * case just flush:
31927 */
31928- pgd = pgd_offset(current->active_mm, address);
31929+
31930 pgd_ref = pgd_offset_k(address);
31931 if (pgd_none(*pgd_ref))
31932 return -1;
31933
31934+#ifdef CONFIG_PAX_PER_CPU_PGD
31935+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
31936+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
31937+ if (pgd_none(*pgd)) {
31938+ set_pgd(pgd, *pgd_ref);
31939+ arch_flush_lazy_mmu_mode();
31940+ } else {
31941+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
31942+ }
31943+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
31944+#else
31945+ pgd = pgd_offset(current->active_mm, address);
31946+#endif
31947+
31948 if (pgd_none(*pgd)) {
31949 set_pgd(pgd, *pgd_ref);
31950 arch_flush_lazy_mmu_mode();
31951@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
31952 static int is_errata100(struct pt_regs *regs, unsigned long address)
31953 {
31954 #ifdef CONFIG_X86_64
31955- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
31956+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
31957 return 1;
31958 #endif
31959 return 0;
31960@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
31961 }
31962
31963 static const char nx_warning[] = KERN_CRIT
31964-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
31965+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
31966 static const char smep_warning[] = KERN_CRIT
31967-"unable to execute userspace code (SMEP?) (uid: %d)\n";
31968+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
31969
31970 static void
31971 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31972@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31973 if (!oops_may_print())
31974 return;
31975
31976- if (error_code & PF_INSTR) {
31977+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
31978 unsigned int level;
31979 pgd_t *pgd;
31980 pte_t *pte;
31981@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
31982 pte = lookup_address_in_pgd(pgd, address, &level);
31983
31984 if (pte && pte_present(*pte) && !pte_exec(*pte))
31985- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
31986+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31987 if (pte && pte_present(*pte) && pte_exec(*pte) &&
31988 (pgd_flags(*pgd) & _PAGE_USER) &&
31989 (__read_cr4() & X86_CR4_SMEP))
31990- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
31991+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
31992 }
31993
31994+#ifdef CONFIG_PAX_KERNEXEC
31995+ if (init_mm.start_code <= address && address < init_mm.end_code) {
31996+ if (current->signal->curr_ip)
31997+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
31998+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
31999+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32000+ else
32001+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32002+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32003+ }
32004+#endif
32005+
32006 printk(KERN_ALERT "BUG: unable to handle kernel ");
32007 if (address < PAGE_SIZE)
32008 printk(KERN_CONT "NULL pointer dereference");
32009@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32010 return;
32011 }
32012 #endif
32013+
32014+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32015+ if (pax_is_fetch_fault(regs, error_code, address)) {
32016+
32017+#ifdef CONFIG_PAX_EMUTRAMP
32018+ switch (pax_handle_fetch_fault(regs)) {
32019+ case 2:
32020+ return;
32021+ }
32022+#endif
32023+
32024+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32025+ do_group_exit(SIGKILL);
32026+ }
32027+#endif
32028+
32029 /* Kernel addresses are always protection faults: */
32030 if (address >= TASK_SIZE)
32031 error_code |= PF_PROT;
32032@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32033 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32034 printk(KERN_ERR
32035 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32036- tsk->comm, tsk->pid, address);
32037+ tsk->comm, task_pid_nr(tsk), address);
32038 code = BUS_MCEERR_AR;
32039 }
32040 #endif
32041@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32042 return 1;
32043 }
32044
32045+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32046+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32047+{
32048+ pte_t *pte;
32049+ pmd_t *pmd;
32050+ spinlock_t *ptl;
32051+ unsigned char pte_mask;
32052+
32053+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32054+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32055+ return 0;
32056+
32057+ /* PaX: it's our fault, let's handle it if we can */
32058+
32059+ /* PaX: take a look at read faults before acquiring any locks */
32060+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32061+ /* instruction fetch attempt from a protected page in user mode */
32062+ up_read(&mm->mmap_sem);
32063+
32064+#ifdef CONFIG_PAX_EMUTRAMP
32065+ switch (pax_handle_fetch_fault(regs)) {
32066+ case 2:
32067+ return 1;
32068+ }
32069+#endif
32070+
32071+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32072+ do_group_exit(SIGKILL);
32073+ }
32074+
32075+ pmd = pax_get_pmd(mm, address);
32076+ if (unlikely(!pmd))
32077+ return 0;
32078+
32079+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32080+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32081+ pte_unmap_unlock(pte, ptl);
32082+ return 0;
32083+ }
32084+
32085+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32086+ /* write attempt to a protected page in user mode */
32087+ pte_unmap_unlock(pte, ptl);
32088+ return 0;
32089+ }
32090+
32091+#ifdef CONFIG_SMP
32092+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32093+#else
32094+ if (likely(address > get_limit(regs->cs)))
32095+#endif
32096+ {
32097+ set_pte(pte, pte_mkread(*pte));
32098+ __flush_tlb_one(address);
32099+ pte_unmap_unlock(pte, ptl);
32100+ up_read(&mm->mmap_sem);
32101+ return 1;
32102+ }
32103+
32104+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32105+
32106+ /*
32107+ * PaX: fill DTLB with user rights and retry
32108+ */
32109+ __asm__ __volatile__ (
32110+ "orb %2,(%1)\n"
32111+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32112+/*
32113+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32114+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32115+ * page fault when examined during a TLB load attempt. this is true not only
32116+ * for PTEs holding a non-present entry but also present entries that will
32117+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32118+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32119+ * for our target pages since their PTEs are simply not in the TLBs at all.
32120+
32121+ * the best thing in omitting it is that we gain around 15-20% speed in the
32122+ * fast path of the page fault handler and can get rid of tracing since we
32123+ * can no longer flush unintended entries.
32124+ */
32125+ "invlpg (%0)\n"
32126+#endif
32127+ __copyuser_seg"testb $0,(%0)\n"
32128+ "xorb %3,(%1)\n"
32129+ :
32130+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32131+ : "memory", "cc");
32132+ pte_unmap_unlock(pte, ptl);
32133+ up_read(&mm->mmap_sem);
32134+ return 1;
32135+}
32136+#endif
32137+
32138 /*
32139 * Handle a spurious fault caused by a stale TLB entry.
32140 *
32141@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32142 static inline int
32143 access_error(unsigned long error_code, struct vm_area_struct *vma)
32144 {
32145+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32146+ return 1;
32147+
32148 if (error_code & PF_WRITE) {
32149 /* write, present and write, not present: */
32150 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32151@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32152 if (error_code & PF_USER)
32153 return false;
32154
32155- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32156+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32157 return false;
32158
32159 return true;
32160@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32161 tsk = current;
32162 mm = tsk->mm;
32163
32164+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32165+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32166+ if (!search_exception_tables(regs->ip)) {
32167+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32168+ bad_area_nosemaphore(regs, error_code, address);
32169+ return;
32170+ }
32171+ if (address < pax_user_shadow_base) {
32172+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32173+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32174+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32175+ } else
32176+ address -= pax_user_shadow_base;
32177+ }
32178+#endif
32179+
32180 /*
32181 * Detect and handle instructions that would cause a page fault for
32182 * both a tracked kernel page and a userspace page.
32183@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32184 * User-mode registers count as a user access even for any
32185 * potential system fault or CPU buglet:
32186 */
32187- if (user_mode_vm(regs)) {
32188+ if (user_mode(regs)) {
32189 local_irq_enable();
32190 error_code |= PF_USER;
32191 flags |= FAULT_FLAG_USER;
32192@@ -1187,6 +1411,11 @@ retry:
32193 might_sleep();
32194 }
32195
32196+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32197+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32198+ return;
32199+#endif
32200+
32201 vma = find_vma(mm, address);
32202 if (unlikely(!vma)) {
32203 bad_area(regs, error_code, address);
32204@@ -1198,18 +1427,24 @@ retry:
32205 bad_area(regs, error_code, address);
32206 return;
32207 }
32208- if (error_code & PF_USER) {
32209- /*
32210- * Accessing the stack below %sp is always a bug.
32211- * The large cushion allows instructions like enter
32212- * and pusha to work. ("enter $65535, $31" pushes
32213- * 32 pointers and then decrements %sp by 65535.)
32214- */
32215- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32216- bad_area(regs, error_code, address);
32217- return;
32218- }
32219+ /*
32220+ * Accessing the stack below %sp is always a bug.
32221+ * The large cushion allows instructions like enter
32222+ * and pusha to work. ("enter $65535, $31" pushes
32223+ * 32 pointers and then decrements %sp by 65535.)
32224+ */
32225+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32226+ bad_area(regs, error_code, address);
32227+ return;
32228 }
32229+
32230+#ifdef CONFIG_PAX_SEGMEXEC
32231+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32232+ bad_area(regs, error_code, address);
32233+ return;
32234+ }
32235+#endif
32236+
32237 if (unlikely(expand_stack(vma, address))) {
32238 bad_area(regs, error_code, address);
32239 return;
32240@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32241 }
32242 NOKPROBE_SYMBOL(trace_do_page_fault);
32243 #endif /* CONFIG_TRACING */
32244+
32245+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32246+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32247+{
32248+ struct mm_struct *mm = current->mm;
32249+ unsigned long ip = regs->ip;
32250+
32251+ if (v8086_mode(regs))
32252+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32253+
32254+#ifdef CONFIG_PAX_PAGEEXEC
32255+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32256+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32257+ return true;
32258+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32259+ return true;
32260+ return false;
32261+ }
32262+#endif
32263+
32264+#ifdef CONFIG_PAX_SEGMEXEC
32265+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32266+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32267+ return true;
32268+ return false;
32269+ }
32270+#endif
32271+
32272+ return false;
32273+}
32274+#endif
32275+
32276+#ifdef CONFIG_PAX_EMUTRAMP
32277+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32278+{
32279+ int err;
32280+
32281+ do { /* PaX: libffi trampoline emulation */
32282+ unsigned char mov, jmp;
32283+ unsigned int addr1, addr2;
32284+
32285+#ifdef CONFIG_X86_64
32286+ if ((regs->ip + 9) >> 32)
32287+ break;
32288+#endif
32289+
32290+ err = get_user(mov, (unsigned char __user *)regs->ip);
32291+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32292+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32293+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32294+
32295+ if (err)
32296+ break;
32297+
32298+ if (mov == 0xB8 && jmp == 0xE9) {
32299+ regs->ax = addr1;
32300+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32301+ return 2;
32302+ }
32303+ } while (0);
32304+
32305+ do { /* PaX: gcc trampoline emulation #1 */
32306+ unsigned char mov1, mov2;
32307+ unsigned short jmp;
32308+ unsigned int addr1, addr2;
32309+
32310+#ifdef CONFIG_X86_64
32311+ if ((regs->ip + 11) >> 32)
32312+ break;
32313+#endif
32314+
32315+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32316+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32317+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32318+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32319+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32320+
32321+ if (err)
32322+ break;
32323+
32324+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32325+ regs->cx = addr1;
32326+ regs->ax = addr2;
32327+ regs->ip = addr2;
32328+ return 2;
32329+ }
32330+ } while (0);
32331+
32332+ do { /* PaX: gcc trampoline emulation #2 */
32333+ unsigned char mov, jmp;
32334+ unsigned int addr1, addr2;
32335+
32336+#ifdef CONFIG_X86_64
32337+ if ((regs->ip + 9) >> 32)
32338+ break;
32339+#endif
32340+
32341+ err = get_user(mov, (unsigned char __user *)regs->ip);
32342+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32343+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32344+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32345+
32346+ if (err)
32347+ break;
32348+
32349+ if (mov == 0xB9 && jmp == 0xE9) {
32350+ regs->cx = addr1;
32351+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32352+ return 2;
32353+ }
32354+ } while (0);
32355+
32356+ return 1; /* PaX in action */
32357+}
32358+
32359+#ifdef CONFIG_X86_64
32360+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32361+{
32362+ int err;
32363+
32364+ do { /* PaX: libffi trampoline emulation */
32365+ unsigned short mov1, mov2, jmp1;
32366+ unsigned char stcclc, jmp2;
32367+ unsigned long addr1, addr2;
32368+
32369+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32370+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32371+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32372+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32373+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32374+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32375+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32376+
32377+ if (err)
32378+ break;
32379+
32380+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32381+ regs->r11 = addr1;
32382+ regs->r10 = addr2;
32383+ if (stcclc == 0xF8)
32384+ regs->flags &= ~X86_EFLAGS_CF;
32385+ else
32386+ regs->flags |= X86_EFLAGS_CF;
32387+ regs->ip = addr1;
32388+ return 2;
32389+ }
32390+ } while (0);
32391+
32392+ do { /* PaX: gcc trampoline emulation #1 */
32393+ unsigned short mov1, mov2, jmp1;
32394+ unsigned char jmp2;
32395+ unsigned int addr1;
32396+ unsigned long addr2;
32397+
32398+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32399+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32400+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32401+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32402+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32403+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32404+
32405+ if (err)
32406+ break;
32407+
32408+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32409+ regs->r11 = addr1;
32410+ regs->r10 = addr2;
32411+ regs->ip = addr1;
32412+ return 2;
32413+ }
32414+ } while (0);
32415+
32416+ do { /* PaX: gcc trampoline emulation #2 */
32417+ unsigned short mov1, mov2, jmp1;
32418+ unsigned char jmp2;
32419+ unsigned long addr1, addr2;
32420+
32421+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32422+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32423+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32424+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32425+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32426+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32427+
32428+ if (err)
32429+ break;
32430+
32431+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32432+ regs->r11 = addr1;
32433+ regs->r10 = addr2;
32434+ regs->ip = addr1;
32435+ return 2;
32436+ }
32437+ } while (0);
32438+
32439+ return 1; /* PaX in action */
32440+}
32441+#endif
32442+
32443+/*
32444+ * PaX: decide what to do with offenders (regs->ip = fault address)
32445+ *
32446+ * returns 1 when task should be killed
32447+ * 2 when gcc trampoline was detected
32448+ */
32449+static int pax_handle_fetch_fault(struct pt_regs *regs)
32450+{
32451+ if (v8086_mode(regs))
32452+ return 1;
32453+
32454+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32455+ return 1;
32456+
32457+#ifdef CONFIG_X86_32
32458+ return pax_handle_fetch_fault_32(regs);
32459+#else
32460+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32461+ return pax_handle_fetch_fault_32(regs);
32462+ else
32463+ return pax_handle_fetch_fault_64(regs);
32464+#endif
32465+}
32466+#endif
32467+
32468+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32469+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32470+{
32471+ long i;
32472+
32473+ printk(KERN_ERR "PAX: bytes at PC: ");
32474+ for (i = 0; i < 20; i++) {
32475+ unsigned char c;
32476+ if (get_user(c, (unsigned char __force_user *)pc+i))
32477+ printk(KERN_CONT "?? ");
32478+ else
32479+ printk(KERN_CONT "%02x ", c);
32480+ }
32481+ printk("\n");
32482+
32483+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32484+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32485+ unsigned long c;
32486+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32487+#ifdef CONFIG_X86_32
32488+ printk(KERN_CONT "???????? ");
32489+#else
32490+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32491+ printk(KERN_CONT "???????? ???????? ");
32492+ else
32493+ printk(KERN_CONT "???????????????? ");
32494+#endif
32495+ } else {
32496+#ifdef CONFIG_X86_64
32497+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32498+ printk(KERN_CONT "%08x ", (unsigned int)c);
32499+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32500+ } else
32501+#endif
32502+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32503+ }
32504+ }
32505+ printk("\n");
32506+}
32507+#endif
32508+
32509+/**
32510+ * probe_kernel_write(): safely attempt to write to a location
32511+ * @dst: address to write to
32512+ * @src: pointer to the data that shall be written
32513+ * @size: size of the data chunk
32514+ *
32515+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32516+ * happens, handle that and return -EFAULT.
32517+ */
32518+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32519+{
32520+ long ret;
32521+ mm_segment_t old_fs = get_fs();
32522+
32523+ set_fs(KERNEL_DS);
32524+ pagefault_disable();
32525+ pax_open_kernel();
32526+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32527+ pax_close_kernel();
32528+ pagefault_enable();
32529+ set_fs(old_fs);
32530+
32531+ return ret ? -EFAULT : 0;
32532+}
32533diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32534index 81bf3d2..7ef25c2 100644
32535--- a/arch/x86/mm/gup.c
32536+++ b/arch/x86/mm/gup.c
32537@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32538 addr = start;
32539 len = (unsigned long) nr_pages << PAGE_SHIFT;
32540 end = start + len;
32541- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32542+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32543 (void __user *)start, len)))
32544 return 0;
32545
32546@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32547 goto slow_irqon;
32548 #endif
32549
32550+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32551+ (void __user *)start, len)))
32552+ return 0;
32553+
32554 /*
32555 * XXX: batch / limit 'nr', to avoid large irq off latency
32556 * needs some instrumenting to determine the common sizes used by
32557diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32558index 4500142..53a363c 100644
32559--- a/arch/x86/mm/highmem_32.c
32560+++ b/arch/x86/mm/highmem_32.c
32561@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32562 idx = type + KM_TYPE_NR*smp_processor_id();
32563 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32564 BUG_ON(!pte_none(*(kmap_pte-idx)));
32565+
32566+ pax_open_kernel();
32567 set_pte(kmap_pte-idx, mk_pte(page, prot));
32568+ pax_close_kernel();
32569+
32570 arch_flush_lazy_mmu_mode();
32571
32572 return (void *)vaddr;
32573diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32574index 42982b2..7168fc3 100644
32575--- a/arch/x86/mm/hugetlbpage.c
32576+++ b/arch/x86/mm/hugetlbpage.c
32577@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32578 #ifdef CONFIG_HUGETLB_PAGE
32579 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32580 unsigned long addr, unsigned long len,
32581- unsigned long pgoff, unsigned long flags)
32582+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32583 {
32584 struct hstate *h = hstate_file(file);
32585 struct vm_unmapped_area_info info;
32586-
32587+
32588 info.flags = 0;
32589 info.length = len;
32590 info.low_limit = current->mm->mmap_legacy_base;
32591 info.high_limit = TASK_SIZE;
32592 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32593 info.align_offset = 0;
32594+ info.threadstack_offset = offset;
32595 return vm_unmapped_area(&info);
32596 }
32597
32598 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32599 unsigned long addr0, unsigned long len,
32600- unsigned long pgoff, unsigned long flags)
32601+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32602 {
32603 struct hstate *h = hstate_file(file);
32604 struct vm_unmapped_area_info info;
32605@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32606 info.high_limit = current->mm->mmap_base;
32607 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32608 info.align_offset = 0;
32609+ info.threadstack_offset = offset;
32610 addr = vm_unmapped_area(&info);
32611
32612 /*
32613@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32614 VM_BUG_ON(addr != -ENOMEM);
32615 info.flags = 0;
32616 info.low_limit = TASK_UNMAPPED_BASE;
32617+
32618+#ifdef CONFIG_PAX_RANDMMAP
32619+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32620+ info.low_limit += current->mm->delta_mmap;
32621+#endif
32622+
32623 info.high_limit = TASK_SIZE;
32624 addr = vm_unmapped_area(&info);
32625 }
32626@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32627 struct hstate *h = hstate_file(file);
32628 struct mm_struct *mm = current->mm;
32629 struct vm_area_struct *vma;
32630+ unsigned long pax_task_size = TASK_SIZE;
32631+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32632
32633 if (len & ~huge_page_mask(h))
32634 return -EINVAL;
32635- if (len > TASK_SIZE)
32636+
32637+#ifdef CONFIG_PAX_SEGMEXEC
32638+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32639+ pax_task_size = SEGMEXEC_TASK_SIZE;
32640+#endif
32641+
32642+ pax_task_size -= PAGE_SIZE;
32643+
32644+ if (len > pax_task_size)
32645 return -ENOMEM;
32646
32647 if (flags & MAP_FIXED) {
32648@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32649 return addr;
32650 }
32651
32652+#ifdef CONFIG_PAX_RANDMMAP
32653+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32654+#endif
32655+
32656 if (addr) {
32657 addr = ALIGN(addr, huge_page_size(h));
32658 vma = find_vma(mm, addr);
32659- if (TASK_SIZE - len >= addr &&
32660- (!vma || addr + len <= vma->vm_start))
32661+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32662 return addr;
32663 }
32664 if (mm->get_unmapped_area == arch_get_unmapped_area)
32665 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32666- pgoff, flags);
32667+ pgoff, flags, offset);
32668 else
32669 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32670- pgoff, flags);
32671+ pgoff, flags, offset);
32672 }
32673 #endif /* CONFIG_HUGETLB_PAGE */
32674
32675diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32676index a110efc..a31a18f 100644
32677--- a/arch/x86/mm/init.c
32678+++ b/arch/x86/mm/init.c
32679@@ -4,6 +4,7 @@
32680 #include <linux/swap.h>
32681 #include <linux/memblock.h>
32682 #include <linux/bootmem.h> /* for max_low_pfn */
32683+#include <linux/tboot.h>
32684
32685 #include <asm/cacheflush.h>
32686 #include <asm/e820.h>
32687@@ -17,6 +18,8 @@
32688 #include <asm/proto.h>
32689 #include <asm/dma.h> /* for MAX_DMA_PFN */
32690 #include <asm/microcode.h>
32691+#include <asm/desc.h>
32692+#include <asm/bios_ebda.h>
32693
32694 /*
32695 * We need to define the tracepoints somewhere, and tlb.c
32696@@ -620,7 +623,18 @@ void __init init_mem_mapping(void)
32697 early_ioremap_page_table_range_init();
32698 #endif
32699
32700+#ifdef CONFIG_PAX_PER_CPU_PGD
32701+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32702+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32703+ KERNEL_PGD_PTRS);
32704+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32705+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32706+ KERNEL_PGD_PTRS);
32707+ load_cr3(get_cpu_pgd(0, kernel));
32708+#else
32709 load_cr3(swapper_pg_dir);
32710+#endif
32711+
32712 __flush_tlb_all();
32713
32714 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32715@@ -636,10 +650,40 @@ void __init init_mem_mapping(void)
32716 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32717 * mmio resources as well as potential bios/acpi data regions.
32718 */
32719+
32720+#ifdef CONFIG_GRKERNSEC_KMEM
32721+static unsigned int ebda_start __read_only;
32722+static unsigned int ebda_end __read_only;
32723+#endif
32724+
32725 int devmem_is_allowed(unsigned long pagenr)
32726 {
32727- if (pagenr < 256)
32728+#ifdef CONFIG_GRKERNSEC_KMEM
32729+ /* allow BDA */
32730+ if (!pagenr)
32731 return 1;
32732+ /* allow EBDA */
32733+ if (pagenr >= ebda_start && pagenr < ebda_end)
32734+ return 1;
32735+ /* if tboot is in use, allow access to its hardcoded serial log range */
32736+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32737+ return 1;
32738+#else
32739+ if (!pagenr)
32740+ return 1;
32741+#ifdef CONFIG_VM86
32742+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32743+ return 1;
32744+#endif
32745+#endif
32746+
32747+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32748+ return 1;
32749+#ifdef CONFIG_GRKERNSEC_KMEM
32750+ /* throw out everything else below 1MB */
32751+ if (pagenr <= 256)
32752+ return 0;
32753+#endif
32754 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32755 return 0;
32756 if (!page_is_ram(pagenr))
32757@@ -685,8 +729,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32758 #endif
32759 }
32760
32761+#ifdef CONFIG_GRKERNSEC_KMEM
32762+static inline void gr_init_ebda(void)
32763+{
32764+ unsigned int ebda_addr;
32765+ unsigned int ebda_size = 0;
32766+
32767+ ebda_addr = get_bios_ebda();
32768+ if (ebda_addr) {
32769+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32770+ ebda_size <<= 10;
32771+ }
32772+ if (ebda_addr && ebda_size) {
32773+ ebda_start = ebda_addr >> PAGE_SHIFT;
32774+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32775+ } else {
32776+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32777+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32778+ }
32779+}
32780+#else
32781+static inline void gr_init_ebda(void) { }
32782+#endif
32783+
32784 void free_initmem(void)
32785 {
32786+#ifdef CONFIG_PAX_KERNEXEC
32787+#ifdef CONFIG_X86_32
32788+ /* PaX: limit KERNEL_CS to actual size */
32789+ unsigned long addr, limit;
32790+ struct desc_struct d;
32791+ int cpu;
32792+#else
32793+ pgd_t *pgd;
32794+ pud_t *pud;
32795+ pmd_t *pmd;
32796+ unsigned long addr, end;
32797+#endif
32798+#endif
32799+
32800+ gr_init_ebda();
32801+
32802+#ifdef CONFIG_PAX_KERNEXEC
32803+#ifdef CONFIG_X86_32
32804+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32805+ limit = (limit - 1UL) >> PAGE_SHIFT;
32806+
32807+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32808+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32809+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32810+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32811+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32812+ }
32813+
32814+ /* PaX: make KERNEL_CS read-only */
32815+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32816+ if (!paravirt_enabled())
32817+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32818+/*
32819+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32820+ pgd = pgd_offset_k(addr);
32821+ pud = pud_offset(pgd, addr);
32822+ pmd = pmd_offset(pud, addr);
32823+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32824+ }
32825+*/
32826+#ifdef CONFIG_X86_PAE
32827+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32828+/*
32829+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32830+ pgd = pgd_offset_k(addr);
32831+ pud = pud_offset(pgd, addr);
32832+ pmd = pmd_offset(pud, addr);
32833+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32834+ }
32835+*/
32836+#endif
32837+
32838+#ifdef CONFIG_MODULES
32839+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32840+#endif
32841+
32842+#else
32843+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32844+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32845+ pgd = pgd_offset_k(addr);
32846+ pud = pud_offset(pgd, addr);
32847+ pmd = pmd_offset(pud, addr);
32848+ if (!pmd_present(*pmd))
32849+ continue;
32850+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32851+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32852+ else
32853+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32854+ }
32855+
32856+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32857+ end = addr + KERNEL_IMAGE_SIZE;
32858+ for (; addr < end; addr += PMD_SIZE) {
32859+ pgd = pgd_offset_k(addr);
32860+ pud = pud_offset(pgd, addr);
32861+ pmd = pmd_offset(pud, addr);
32862+ if (!pmd_present(*pmd))
32863+ continue;
32864+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32865+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32866+ }
32867+#endif
32868+
32869+ flush_tlb_all();
32870+#endif
32871+
32872 free_init_pages("unused kernel",
32873 (unsigned long)(&__init_begin),
32874 (unsigned long)(&__init_end));
32875diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32876index c8140e1..59257fc 100644
32877--- a/arch/x86/mm/init_32.c
32878+++ b/arch/x86/mm/init_32.c
32879@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32880 bool __read_mostly __vmalloc_start_set = false;
32881
32882 /*
32883- * Creates a middle page table and puts a pointer to it in the
32884- * given global directory entry. This only returns the gd entry
32885- * in non-PAE compilation mode, since the middle layer is folded.
32886- */
32887-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32888-{
32889- pud_t *pud;
32890- pmd_t *pmd_table;
32891-
32892-#ifdef CONFIG_X86_PAE
32893- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32894- pmd_table = (pmd_t *)alloc_low_page();
32895- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32896- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32897- pud = pud_offset(pgd, 0);
32898- BUG_ON(pmd_table != pmd_offset(pud, 0));
32899-
32900- return pmd_table;
32901- }
32902-#endif
32903- pud = pud_offset(pgd, 0);
32904- pmd_table = pmd_offset(pud, 0);
32905-
32906- return pmd_table;
32907-}
32908-
32909-/*
32910 * Create a page table and place a pointer to it in a middle page
32911 * directory entry:
32912 */
32913@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32914 pte_t *page_table = (pte_t *)alloc_low_page();
32915
32916 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32917+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32918+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32919+#else
32920 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32921+#endif
32922 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32923 }
32924
32925 return pte_offset_kernel(pmd, 0);
32926 }
32927
32928+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32929+{
32930+ pud_t *pud;
32931+ pmd_t *pmd_table;
32932+
32933+ pud = pud_offset(pgd, 0);
32934+ pmd_table = pmd_offset(pud, 0);
32935+
32936+ return pmd_table;
32937+}
32938+
32939 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
32940 {
32941 int pgd_idx = pgd_index(vaddr);
32942@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32943 int pgd_idx, pmd_idx;
32944 unsigned long vaddr;
32945 pgd_t *pgd;
32946+ pud_t *pud;
32947 pmd_t *pmd;
32948 pte_t *pte = NULL;
32949 unsigned long count = page_table_range_init_count(start, end);
32950@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32951 pgd = pgd_base + pgd_idx;
32952
32953 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
32954- pmd = one_md_table_init(pgd);
32955- pmd = pmd + pmd_index(vaddr);
32956+ pud = pud_offset(pgd, vaddr);
32957+ pmd = pmd_offset(pud, vaddr);
32958+
32959+#ifdef CONFIG_X86_PAE
32960+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
32961+#endif
32962+
32963 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
32964 pmd++, pmd_idx++) {
32965 pte = page_table_kmap_check(one_page_table_init(pmd),
32966@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
32967 }
32968 }
32969
32970-static inline int is_kernel_text(unsigned long addr)
32971+static inline int is_kernel_text(unsigned long start, unsigned long end)
32972 {
32973- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
32974- return 1;
32975- return 0;
32976+ if ((start >= ktla_ktva((unsigned long)_etext) ||
32977+ end <= ktla_ktva((unsigned long)_stext)) &&
32978+ (start >= ktla_ktva((unsigned long)_einittext) ||
32979+ end <= ktla_ktva((unsigned long)_sinittext)) &&
32980+
32981+#ifdef CONFIG_ACPI_SLEEP
32982+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
32983+#endif
32984+
32985+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
32986+ return 0;
32987+ return 1;
32988 }
32989
32990 /*
32991@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
32992 unsigned long last_map_addr = end;
32993 unsigned long start_pfn, end_pfn;
32994 pgd_t *pgd_base = swapper_pg_dir;
32995- int pgd_idx, pmd_idx, pte_ofs;
32996+ unsigned int pgd_idx, pmd_idx, pte_ofs;
32997 unsigned long pfn;
32998 pgd_t *pgd;
32999+ pud_t *pud;
33000 pmd_t *pmd;
33001 pte_t *pte;
33002 unsigned pages_2m, pages_4k;
33003@@ -291,8 +295,13 @@ repeat:
33004 pfn = start_pfn;
33005 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33006 pgd = pgd_base + pgd_idx;
33007- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33008- pmd = one_md_table_init(pgd);
33009+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33010+ pud = pud_offset(pgd, 0);
33011+ pmd = pmd_offset(pud, 0);
33012+
33013+#ifdef CONFIG_X86_PAE
33014+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33015+#endif
33016
33017 if (pfn >= end_pfn)
33018 continue;
33019@@ -304,14 +313,13 @@ repeat:
33020 #endif
33021 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33022 pmd++, pmd_idx++) {
33023- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33024+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33025
33026 /*
33027 * Map with big pages if possible, otherwise
33028 * create normal page tables:
33029 */
33030 if (use_pse) {
33031- unsigned int addr2;
33032 pgprot_t prot = PAGE_KERNEL_LARGE;
33033 /*
33034 * first pass will use the same initial
33035@@ -322,11 +330,7 @@ repeat:
33036 _PAGE_PSE);
33037
33038 pfn &= PMD_MASK >> PAGE_SHIFT;
33039- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33040- PAGE_OFFSET + PAGE_SIZE-1;
33041-
33042- if (is_kernel_text(addr) ||
33043- is_kernel_text(addr2))
33044+ if (is_kernel_text(address, address + PMD_SIZE))
33045 prot = PAGE_KERNEL_LARGE_EXEC;
33046
33047 pages_2m++;
33048@@ -343,7 +347,7 @@ repeat:
33049 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33050 pte += pte_ofs;
33051 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33052- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33053+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33054 pgprot_t prot = PAGE_KERNEL;
33055 /*
33056 * first pass will use the same initial
33057@@ -351,7 +355,7 @@ repeat:
33058 */
33059 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33060
33061- if (is_kernel_text(addr))
33062+ if (is_kernel_text(address, address + PAGE_SIZE))
33063 prot = PAGE_KERNEL_EXEC;
33064
33065 pages_4k++;
33066@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33067
33068 pud = pud_offset(pgd, va);
33069 pmd = pmd_offset(pud, va);
33070- if (!pmd_present(*pmd))
33071+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33072 break;
33073
33074 /* should not be large page here */
33075@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33076
33077 static void __init pagetable_init(void)
33078 {
33079- pgd_t *pgd_base = swapper_pg_dir;
33080-
33081- permanent_kmaps_init(pgd_base);
33082+ permanent_kmaps_init(swapper_pg_dir);
33083 }
33084
33085-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33086+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33087 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33088
33089 /* user-defined highmem size */
33090@@ -787,10 +789,10 @@ void __init mem_init(void)
33091 ((unsigned long)&__init_end -
33092 (unsigned long)&__init_begin) >> 10,
33093
33094- (unsigned long)&_etext, (unsigned long)&_edata,
33095- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33096+ (unsigned long)&_sdata, (unsigned long)&_edata,
33097+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33098
33099- (unsigned long)&_text, (unsigned long)&_etext,
33100+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33101 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33102
33103 /*
33104@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33105 if (!kernel_set_to_readonly)
33106 return;
33107
33108+ start = ktla_ktva(start);
33109 pr_debug("Set kernel text: %lx - %lx for read write\n",
33110 start, start+size);
33111
33112@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33113 if (!kernel_set_to_readonly)
33114 return;
33115
33116+ start = ktla_ktva(start);
33117 pr_debug("Set kernel text: %lx - %lx for read only\n",
33118 start, start+size);
33119
33120@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33121 unsigned long start = PFN_ALIGN(_text);
33122 unsigned long size = PFN_ALIGN(_etext) - start;
33123
33124+ start = ktla_ktva(start);
33125 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33126 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33127 size >> 10);
33128diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33129index 30eb05a..ae671ac 100644
33130--- a/arch/x86/mm/init_64.c
33131+++ b/arch/x86/mm/init_64.c
33132@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33133 * around without checking the pgd every time.
33134 */
33135
33136-pteval_t __supported_pte_mask __read_mostly = ~0;
33137+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33138 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33139
33140 int force_personality32;
33141@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33142
33143 for (address = start; address <= end; address += PGDIR_SIZE) {
33144 const pgd_t *pgd_ref = pgd_offset_k(address);
33145+
33146+#ifdef CONFIG_PAX_PER_CPU_PGD
33147+ unsigned long cpu;
33148+#else
33149 struct page *page;
33150+#endif
33151
33152 /*
33153 * When it is called after memory hot remove, pgd_none()
33154@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33155 continue;
33156
33157 spin_lock(&pgd_lock);
33158+
33159+#ifdef CONFIG_PAX_PER_CPU_PGD
33160+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33161+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33162+
33163+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33164+ BUG_ON(pgd_page_vaddr(*pgd)
33165+ != pgd_page_vaddr(*pgd_ref));
33166+
33167+ if (removed) {
33168+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33169+ pgd_clear(pgd);
33170+ } else {
33171+ if (pgd_none(*pgd))
33172+ set_pgd(pgd, *pgd_ref);
33173+ }
33174+
33175+ pgd = pgd_offset_cpu(cpu, kernel, address);
33176+#else
33177 list_for_each_entry(page, &pgd_list, lru) {
33178 pgd_t *pgd;
33179 spinlock_t *pgt_lock;
33180@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33181 /* the pgt_lock only for Xen */
33182 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33183 spin_lock(pgt_lock);
33184+#endif
33185
33186 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33187 BUG_ON(pgd_page_vaddr(*pgd)
33188@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33189 set_pgd(pgd, *pgd_ref);
33190 }
33191
33192+#ifndef CONFIG_PAX_PER_CPU_PGD
33193 spin_unlock(pgt_lock);
33194+#endif
33195+
33196 }
33197 spin_unlock(&pgd_lock);
33198 }
33199@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33200 {
33201 if (pgd_none(*pgd)) {
33202 pud_t *pud = (pud_t *)spp_getpage();
33203- pgd_populate(&init_mm, pgd, pud);
33204+ pgd_populate_kernel(&init_mm, pgd, pud);
33205 if (pud != pud_offset(pgd, 0))
33206 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33207 pud, pud_offset(pgd, 0));
33208@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33209 {
33210 if (pud_none(*pud)) {
33211 pmd_t *pmd = (pmd_t *) spp_getpage();
33212- pud_populate(&init_mm, pud, pmd);
33213+ pud_populate_kernel(&init_mm, pud, pmd);
33214 if (pmd != pmd_offset(pud, 0))
33215 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33216 pmd, pmd_offset(pud, 0));
33217@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33218 pmd = fill_pmd(pud, vaddr);
33219 pte = fill_pte(pmd, vaddr);
33220
33221+ pax_open_kernel();
33222 set_pte(pte, new_pte);
33223+ pax_close_kernel();
33224
33225 /*
33226 * It's enough to flush this one mapping.
33227@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33228 pgd = pgd_offset_k((unsigned long)__va(phys));
33229 if (pgd_none(*pgd)) {
33230 pud = (pud_t *) spp_getpage();
33231- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33232- _PAGE_USER));
33233+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33234 }
33235 pud = pud_offset(pgd, (unsigned long)__va(phys));
33236 if (pud_none(*pud)) {
33237 pmd = (pmd_t *) spp_getpage();
33238- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33239- _PAGE_USER));
33240+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33241 }
33242 pmd = pmd_offset(pud, phys);
33243 BUG_ON(!pmd_none(*pmd));
33244@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33245 prot);
33246
33247 spin_lock(&init_mm.page_table_lock);
33248- pud_populate(&init_mm, pud, pmd);
33249+ pud_populate_kernel(&init_mm, pud, pmd);
33250 spin_unlock(&init_mm.page_table_lock);
33251 }
33252 __flush_tlb_all();
33253@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33254 page_size_mask);
33255
33256 spin_lock(&init_mm.page_table_lock);
33257- pgd_populate(&init_mm, pgd, pud);
33258+ pgd_populate_kernel(&init_mm, pgd, pud);
33259 spin_unlock(&init_mm.page_table_lock);
33260 pgd_changed = true;
33261 }
33262diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33263index 9ca35fc..4b2b7b7 100644
33264--- a/arch/x86/mm/iomap_32.c
33265+++ b/arch/x86/mm/iomap_32.c
33266@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33267 type = kmap_atomic_idx_push();
33268 idx = type + KM_TYPE_NR * smp_processor_id();
33269 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33270+
33271+ pax_open_kernel();
33272 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33273+ pax_close_kernel();
33274+
33275 arch_flush_lazy_mmu_mode();
33276
33277 return (void *)vaddr;
33278diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33279index fdf617c..b9e85bc 100644
33280--- a/arch/x86/mm/ioremap.c
33281+++ b/arch/x86/mm/ioremap.c
33282@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33283 unsigned long i;
33284
33285 for (i = 0; i < nr_pages; ++i)
33286- if (pfn_valid(start_pfn + i) &&
33287- !PageReserved(pfn_to_page(start_pfn + i)))
33288+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33289+ !PageReserved(pfn_to_page(start_pfn + i))))
33290 return 1;
33291
33292 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33293@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33294 *
33295 * Caller must ensure there is only one unmapping for the same pointer.
33296 */
33297-void iounmap(volatile void __iomem *addr)
33298+void iounmap(const volatile void __iomem *addr)
33299 {
33300 struct vm_struct *p, *o;
33301
33302@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33303 */
33304 void *xlate_dev_mem_ptr(phys_addr_t phys)
33305 {
33306- void *addr;
33307- unsigned long start = phys & PAGE_MASK;
33308-
33309 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33310- if (page_is_ram(start >> PAGE_SHIFT))
33311+ if (page_is_ram(phys >> PAGE_SHIFT))
33312+#ifdef CONFIG_HIGHMEM
33313+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33314+#endif
33315 return __va(phys);
33316
33317- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33318- if (addr)
33319- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33320-
33321- return addr;
33322+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33323 }
33324
33325 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33326 {
33327 if (page_is_ram(phys >> PAGE_SHIFT))
33328+#ifdef CONFIG_HIGHMEM
33329+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33330+#endif
33331 return;
33332
33333 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33334 return;
33335 }
33336
33337-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33338+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33339
33340 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33341 {
33342@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33343 early_ioremap_setup();
33344
33345 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33346- memset(bm_pte, 0, sizeof(bm_pte));
33347- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33348+ pmd_populate_user(&init_mm, pmd, bm_pte);
33349
33350 /*
33351 * The boot-ioremap range spans multiple pmds, for which
33352diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33353index b4f2e7e..96c9c3e 100644
33354--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33355+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33356@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33357 * memory (e.g. tracked pages)? For now, we need this to avoid
33358 * invoking kmemcheck for PnP BIOS calls.
33359 */
33360- if (regs->flags & X86_VM_MASK)
33361+ if (v8086_mode(regs))
33362 return false;
33363- if (regs->cs != __KERNEL_CS)
33364+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33365 return false;
33366
33367 pte = kmemcheck_pte_lookup(address);
33368diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33369index df4552b..12c129c 100644
33370--- a/arch/x86/mm/mmap.c
33371+++ b/arch/x86/mm/mmap.c
33372@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33373 * Leave an at least ~128 MB hole with possible stack randomization.
33374 */
33375 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33376-#define MAX_GAP (TASK_SIZE/6*5)
33377+#define MAX_GAP (pax_task_size/6*5)
33378
33379 static int mmap_is_legacy(void)
33380 {
33381@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33382 return rnd << PAGE_SHIFT;
33383 }
33384
33385-static unsigned long mmap_base(void)
33386+static unsigned long mmap_base(struct mm_struct *mm)
33387 {
33388 unsigned long gap = rlimit(RLIMIT_STACK);
33389+ unsigned long pax_task_size = TASK_SIZE;
33390+
33391+#ifdef CONFIG_PAX_SEGMEXEC
33392+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33393+ pax_task_size = SEGMEXEC_TASK_SIZE;
33394+#endif
33395
33396 if (gap < MIN_GAP)
33397 gap = MIN_GAP;
33398 else if (gap > MAX_GAP)
33399 gap = MAX_GAP;
33400
33401- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33402+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33403 }
33404
33405 /*
33406 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33407 * does, but not when emulating X86_32
33408 */
33409-static unsigned long mmap_legacy_base(void)
33410+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33411 {
33412- if (mmap_is_ia32())
33413+ if (mmap_is_ia32()) {
33414+
33415+#ifdef CONFIG_PAX_SEGMEXEC
33416+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33417+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33418+ else
33419+#endif
33420+
33421 return TASK_UNMAPPED_BASE;
33422- else
33423+ } else
33424 return TASK_UNMAPPED_BASE + mmap_rnd();
33425 }
33426
33427@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33428 */
33429 void arch_pick_mmap_layout(struct mm_struct *mm)
33430 {
33431- mm->mmap_legacy_base = mmap_legacy_base();
33432- mm->mmap_base = mmap_base();
33433+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33434+ mm->mmap_base = mmap_base(mm);
33435+
33436+#ifdef CONFIG_PAX_RANDMMAP
33437+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33438+ mm->mmap_legacy_base += mm->delta_mmap;
33439+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33440+ }
33441+#endif
33442
33443 if (mmap_is_legacy()) {
33444 mm->mmap_base = mm->mmap_legacy_base;
33445diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33446index 0057a7a..95c7edd 100644
33447--- a/arch/x86/mm/mmio-mod.c
33448+++ b/arch/x86/mm/mmio-mod.c
33449@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33450 break;
33451 default:
33452 {
33453- unsigned char *ip = (unsigned char *)instptr;
33454+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33455 my_trace->opcode = MMIO_UNKNOWN_OP;
33456 my_trace->width = 0;
33457 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33458@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33459 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33460 void __iomem *addr)
33461 {
33462- static atomic_t next_id;
33463+ static atomic_unchecked_t next_id;
33464 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33465 /* These are page-unaligned. */
33466 struct mmiotrace_map map = {
33467@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33468 .private = trace
33469 },
33470 .phys = offset,
33471- .id = atomic_inc_return(&next_id)
33472+ .id = atomic_inc_return_unchecked(&next_id)
33473 };
33474 map.map_id = trace->id;
33475
33476@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33477 ioremap_trace_core(offset, size, addr);
33478 }
33479
33480-static void iounmap_trace_core(volatile void __iomem *addr)
33481+static void iounmap_trace_core(const volatile void __iomem *addr)
33482 {
33483 struct mmiotrace_map map = {
33484 .phys = 0,
33485@@ -328,7 +328,7 @@ not_enabled:
33486 }
33487 }
33488
33489-void mmiotrace_iounmap(volatile void __iomem *addr)
33490+void mmiotrace_iounmap(const volatile void __iomem *addr)
33491 {
33492 might_sleep();
33493 if (is_enabled()) /* recheck and proper locking in *_core() */
33494diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33495index cd4785b..25188b6 100644
33496--- a/arch/x86/mm/numa.c
33497+++ b/arch/x86/mm/numa.c
33498@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33499 }
33500 }
33501
33502-static int __init numa_register_memblks(struct numa_meminfo *mi)
33503+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33504 {
33505 unsigned long uninitialized_var(pfn_align);
33506 int i, nid;
33507diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33508index 536ea2f..f42c293 100644
33509--- a/arch/x86/mm/pageattr.c
33510+++ b/arch/x86/mm/pageattr.c
33511@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33512 */
33513 #ifdef CONFIG_PCI_BIOS
33514 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33515- pgprot_val(forbidden) |= _PAGE_NX;
33516+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33517 #endif
33518
33519 /*
33520@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33521 * Does not cover __inittext since that is gone later on. On
33522 * 64bit we do not enforce !NX on the low mapping
33523 */
33524- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33525- pgprot_val(forbidden) |= _PAGE_NX;
33526+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33527+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33528
33529+#ifdef CONFIG_DEBUG_RODATA
33530 /*
33531 * The .rodata section needs to be read-only. Using the pfn
33532 * catches all aliases.
33533@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33534 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33535 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33536 pgprot_val(forbidden) |= _PAGE_RW;
33537+#endif
33538
33539 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33540 /*
33541@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33542 }
33543 #endif
33544
33545+#ifdef CONFIG_PAX_KERNEXEC
33546+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33547+ pgprot_val(forbidden) |= _PAGE_RW;
33548+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33549+ }
33550+#endif
33551+
33552 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33553
33554 return prot;
33555@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33556 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33557 {
33558 /* change init_mm */
33559+ pax_open_kernel();
33560 set_pte_atomic(kpte, pte);
33561+
33562 #ifdef CONFIG_X86_32
33563 if (!SHARED_KERNEL_PMD) {
33564+
33565+#ifdef CONFIG_PAX_PER_CPU_PGD
33566+ unsigned long cpu;
33567+#else
33568 struct page *page;
33569+#endif
33570
33571+#ifdef CONFIG_PAX_PER_CPU_PGD
33572+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33573+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33574+#else
33575 list_for_each_entry(page, &pgd_list, lru) {
33576- pgd_t *pgd;
33577+ pgd_t *pgd = (pgd_t *)page_address(page);
33578+#endif
33579+
33580 pud_t *pud;
33581 pmd_t *pmd;
33582
33583- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33584+ pgd += pgd_index(address);
33585 pud = pud_offset(pgd, address);
33586 pmd = pmd_offset(pud, address);
33587 set_pte_atomic((pte_t *)pmd, pte);
33588 }
33589 }
33590 #endif
33591+ pax_close_kernel();
33592 }
33593
33594 static int
33595diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33596index 7ac6869..c0ba541 100644
33597--- a/arch/x86/mm/pat.c
33598+++ b/arch/x86/mm/pat.c
33599@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33600 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33601
33602 if (pg_flags == _PGMT_DEFAULT)
33603- return -1;
33604+ return _PAGE_CACHE_MODE_NUM;
33605 else if (pg_flags == _PGMT_WC)
33606 return _PAGE_CACHE_MODE_WC;
33607 else if (pg_flags == _PGMT_UC_MINUS)
33608@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33609
33610 page = pfn_to_page(pfn);
33611 type = get_page_memtype(page);
33612- if (type != -1) {
33613+ if (type != _PAGE_CACHE_MODE_NUM) {
33614 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33615 start, end - 1, type, req_type);
33616 if (new_type)
33617@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33618
33619 if (!entry) {
33620 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33621- current->comm, current->pid, start, end - 1);
33622+ current->comm, task_pid_nr(current), start, end - 1);
33623 return -EINVAL;
33624 }
33625
33626@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33627 page = pfn_to_page(paddr >> PAGE_SHIFT);
33628 rettype = get_page_memtype(page);
33629 /*
33630- * -1 from get_page_memtype() implies RAM page is in its
33631+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33632 * default state and not reserved, and hence of type WB
33633 */
33634- if (rettype == -1)
33635+ if (rettype == _PAGE_CACHE_MODE_NUM)
33636 rettype = _PAGE_CACHE_MODE_WB;
33637
33638 return rettype;
33639@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33640
33641 while (cursor < to) {
33642 if (!devmem_is_allowed(pfn)) {
33643- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33644- current->comm, from, to - 1);
33645+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33646+ current->comm, from, to - 1, cursor);
33647 return 0;
33648 }
33649 cursor += PAGE_SIZE;
33650@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33651 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33652 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33653 "for [mem %#010Lx-%#010Lx]\n",
33654- current->comm, current->pid,
33655+ current->comm, task_pid_nr(current),
33656 cattr_name(pcm),
33657 base, (unsigned long long)(base + size-1));
33658 return -EINVAL;
33659@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33660 pcm = lookup_memtype(paddr);
33661 if (want_pcm != pcm) {
33662 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33663- current->comm, current->pid,
33664+ current->comm, task_pid_nr(current),
33665 cattr_name(want_pcm),
33666 (unsigned long long)paddr,
33667 (unsigned long long)(paddr + size - 1),
33668@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33669 free_memtype(paddr, paddr + size);
33670 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33671 " for [mem %#010Lx-%#010Lx], got %s\n",
33672- current->comm, current->pid,
33673+ current->comm, task_pid_nr(current),
33674 cattr_name(want_pcm),
33675 (unsigned long long)paddr,
33676 (unsigned long long)(paddr + size - 1),
33677diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33678index 6582adc..fcc5d0b 100644
33679--- a/arch/x86/mm/pat_rbtree.c
33680+++ b/arch/x86/mm/pat_rbtree.c
33681@@ -161,7 +161,7 @@ success:
33682
33683 failure:
33684 printk(KERN_INFO "%s:%d conflicting memory types "
33685- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33686+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33687 end, cattr_name(found_type), cattr_name(match->type));
33688 return -EBUSY;
33689 }
33690diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33691index 9f0614d..92ae64a 100644
33692--- a/arch/x86/mm/pf_in.c
33693+++ b/arch/x86/mm/pf_in.c
33694@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33695 int i;
33696 enum reason_type rv = OTHERS;
33697
33698- p = (unsigned char *)ins_addr;
33699+ p = (unsigned char *)ktla_ktva(ins_addr);
33700 p += skip_prefix(p, &prf);
33701 p += get_opcode(p, &opcode);
33702
33703@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33704 struct prefix_bits prf;
33705 int i;
33706
33707- p = (unsigned char *)ins_addr;
33708+ p = (unsigned char *)ktla_ktva(ins_addr);
33709 p += skip_prefix(p, &prf);
33710 p += get_opcode(p, &opcode);
33711
33712@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33713 struct prefix_bits prf;
33714 int i;
33715
33716- p = (unsigned char *)ins_addr;
33717+ p = (unsigned char *)ktla_ktva(ins_addr);
33718 p += skip_prefix(p, &prf);
33719 p += get_opcode(p, &opcode);
33720
33721@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33722 struct prefix_bits prf;
33723 int i;
33724
33725- p = (unsigned char *)ins_addr;
33726+ p = (unsigned char *)ktla_ktva(ins_addr);
33727 p += skip_prefix(p, &prf);
33728 p += get_opcode(p, &opcode);
33729 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33730@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33731 struct prefix_bits prf;
33732 int i;
33733
33734- p = (unsigned char *)ins_addr;
33735+ p = (unsigned char *)ktla_ktva(ins_addr);
33736 p += skip_prefix(p, &prf);
33737 p += get_opcode(p, &opcode);
33738 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33739diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33740index 7b22ada..b11e66f 100644
33741--- a/arch/x86/mm/pgtable.c
33742+++ b/arch/x86/mm/pgtable.c
33743@@ -97,10 +97,75 @@ static inline void pgd_list_del(pgd_t *pgd)
33744 list_del(&page->lru);
33745 }
33746
33747-#define UNSHARED_PTRS_PER_PGD \
33748- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33749+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33750+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33751
33752+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33753+{
33754+ unsigned int count = USER_PGD_PTRS;
33755
33756+ if (!pax_user_shadow_base)
33757+ return;
33758+
33759+ while (count--)
33760+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33761+}
33762+#endif
33763+
33764+#ifdef CONFIG_PAX_PER_CPU_PGD
33765+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33766+{
33767+ unsigned int count = USER_PGD_PTRS;
33768+
33769+ while (count--) {
33770+ pgd_t pgd;
33771+
33772+#ifdef CONFIG_X86_64
33773+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33774+#else
33775+ pgd = *src++;
33776+#endif
33777+
33778+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33779+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33780+#endif
33781+
33782+ *dst++ = pgd;
33783+ }
33784+
33785+}
33786+#endif
33787+
33788+#ifdef CONFIG_X86_64
33789+#define pxd_t pud_t
33790+#define pyd_t pgd_t
33791+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33792+#define pgtable_pxd_page_ctor(page) true
33793+#define pgtable_pxd_page_dtor(page) do {} while (0)
33794+#define pxd_free(mm, pud) pud_free((mm), (pud))
33795+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33796+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33797+#define PYD_SIZE PGDIR_SIZE
33798+#define mm_inc_nr_pxds(mm) do {} while (0)
33799+#define mm_dec_nr_pxds(mm) do {} while (0)
33800+#else
33801+#define pxd_t pmd_t
33802+#define pyd_t pud_t
33803+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33804+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33805+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33806+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33807+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33808+#define pyd_offset(mm, address) pud_offset((mm), (address))
33809+#define PYD_SIZE PUD_SIZE
33810+#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
33811+#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
33812+#endif
33813+
33814+#ifdef CONFIG_PAX_PER_CPU_PGD
33815+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33816+static inline void pgd_dtor(pgd_t *pgd) {}
33817+#else
33818 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33819 {
33820 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33821@@ -141,6 +206,7 @@ static void pgd_dtor(pgd_t *pgd)
33822 pgd_list_del(pgd);
33823 spin_unlock(&pgd_lock);
33824 }
33825+#endif
33826
33827 /*
33828 * List of all pgd's needed for non-PAE so it can invalidate entries
33829@@ -153,7 +219,7 @@ static void pgd_dtor(pgd_t *pgd)
33830 * -- nyc
33831 */
33832
33833-#ifdef CONFIG_X86_PAE
33834+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33835 /*
33836 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33837 * updating the top-level pagetable entries to guarantee the
33838@@ -165,7 +231,7 @@ static void pgd_dtor(pgd_t *pgd)
33839 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33840 * and initialize the kernel pmds here.
33841 */
33842-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33843+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33844
33845 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33846 {
33847@@ -183,46 +249,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33848 */
33849 flush_tlb_mm(mm);
33850 }
33851+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33852+#define PREALLOCATED_PXDS USER_PGD_PTRS
33853 #else /* !CONFIG_X86_PAE */
33854
33855 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33856-#define PREALLOCATED_PMDS 0
33857+#define PREALLOCATED_PXDS 0
33858
33859 #endif /* CONFIG_X86_PAE */
33860
33861-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
33862+static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
33863 {
33864 int i;
33865
33866- for(i = 0; i < PREALLOCATED_PMDS; i++)
33867- if (pmds[i]) {
33868- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33869- free_page((unsigned long)pmds[i]);
33870- mm_dec_nr_pmds(mm);
33871+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33872+ if (pxds[i]) {
33873+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33874+ free_page((unsigned long)pxds[i]);
33875+ mm_dec_nr_pxds(mm);
33876 }
33877 }
33878
33879-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33880+static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
33881 {
33882 int i;
33883 bool failed = false;
33884
33885- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33886- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33887- if (!pmd)
33888+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33889+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33890+ if (!pxd)
33891 failed = true;
33892- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33893- free_page((unsigned long)pmd);
33894- pmd = NULL;
33895+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33896+ free_page((unsigned long)pxd);
33897+ pxd = NULL;
33898 failed = true;
33899 }
33900- if (pmd)
33901- mm_inc_nr_pmds(mm);
33902- pmds[i] = pmd;
33903+ if (pxd)
33904+ mm_inc_nr_pxds(mm);
33905+ pxds[i] = pxd;
33906 }
33907
33908 if (failed) {
33909- free_pmds(mm, pmds);
33910+ free_pxds(mm, pxds);
33911 return -ENOMEM;
33912 }
33913
33914@@ -235,50 +303,54 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33915 * preallocate which never got a corresponding vma will need to be
33916 * freed manually.
33917 */
33918-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33919+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33920 {
33921 int i;
33922
33923- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33924+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33925 pgd_t pgd = pgdp[i];
33926
33927 if (pgd_val(pgd) != 0) {
33928- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33929+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33930
33931- pgdp[i] = native_make_pgd(0);
33932+ set_pgd(pgdp + i, native_make_pgd(0));
33933
33934- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33935- pmd_free(mm, pmd);
33936- mm_dec_nr_pmds(mm);
33937+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33938+ pxd_free(mm, pxd);
33939+ mm_dec_nr_pxds(mm);
33940 }
33941 }
33942 }
33943
33944-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
33945+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
33946 {
33947- pud_t *pud;
33948+ pyd_t *pyd;
33949 int i;
33950
33951- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
33952+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
33953 return;
33954
33955- pud = pud_offset(pgd, 0);
33956+#ifdef CONFIG_X86_64
33957+ pyd = pyd_offset(mm, 0L);
33958+#else
33959+ pyd = pyd_offset(pgd, 0L);
33960+#endif
33961
33962- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
33963- pmd_t *pmd = pmds[i];
33964+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
33965+ pxd_t *pxd = pxds[i];
33966
33967 if (i >= KERNEL_PGD_BOUNDARY)
33968- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33969- sizeof(pmd_t) * PTRS_PER_PMD);
33970+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
33971+ sizeof(pxd_t) * PTRS_PER_PMD);
33972
33973- pud_populate(mm, pud, pmd);
33974+ pyd_populate(mm, pyd, pxd);
33975 }
33976 }
33977
33978 pgd_t *pgd_alloc(struct mm_struct *mm)
33979 {
33980 pgd_t *pgd;
33981- pmd_t *pmds[PREALLOCATED_PMDS];
33982+ pxd_t *pxds[PREALLOCATED_PXDS];
33983
33984 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
33985
33986@@ -287,11 +359,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
33987
33988 mm->pgd = pgd;
33989
33990- if (preallocate_pmds(mm, pmds) != 0)
33991+ if (preallocate_pxds(mm, pxds) != 0)
33992 goto out_free_pgd;
33993
33994 if (paravirt_pgd_alloc(mm) != 0)
33995- goto out_free_pmds;
33996+ goto out_free_pxds;
33997
33998 /*
33999 * Make sure that pre-populating the pmds is atomic with
34000@@ -301,14 +373,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34001 spin_lock(&pgd_lock);
34002
34003 pgd_ctor(mm, pgd);
34004- pgd_prepopulate_pmd(mm, pgd, pmds);
34005+ pgd_prepopulate_pxd(mm, pgd, pxds);
34006
34007 spin_unlock(&pgd_lock);
34008
34009 return pgd;
34010
34011-out_free_pmds:
34012- free_pmds(mm, pmds);
34013+out_free_pxds:
34014+ free_pxds(mm, pxds);
34015 out_free_pgd:
34016 free_page((unsigned long)pgd);
34017 out:
34018@@ -317,7 +389,7 @@ out:
34019
34020 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34021 {
34022- pgd_mop_up_pmds(mm, pgd);
34023+ pgd_mop_up_pxds(mm, pgd);
34024 pgd_dtor(pgd);
34025 paravirt_pgd_free(mm, pgd);
34026 free_page((unsigned long)pgd);
34027diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34028index 75cc097..79a097f 100644
34029--- a/arch/x86/mm/pgtable_32.c
34030+++ b/arch/x86/mm/pgtable_32.c
34031@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34032 return;
34033 }
34034 pte = pte_offset_kernel(pmd, vaddr);
34035+
34036+ pax_open_kernel();
34037 if (pte_val(pteval))
34038 set_pte_at(&init_mm, vaddr, pte, pteval);
34039 else
34040 pte_clear(&init_mm, vaddr, pte);
34041+ pax_close_kernel();
34042
34043 /*
34044 * It's enough to flush this one mapping.
34045diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34046index e666cbb..61788c45 100644
34047--- a/arch/x86/mm/physaddr.c
34048+++ b/arch/x86/mm/physaddr.c
34049@@ -10,7 +10,7 @@
34050 #ifdef CONFIG_X86_64
34051
34052 #ifdef CONFIG_DEBUG_VIRTUAL
34053-unsigned long __phys_addr(unsigned long x)
34054+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34055 {
34056 unsigned long y = x - __START_KERNEL_map;
34057
34058@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34059 #else
34060
34061 #ifdef CONFIG_DEBUG_VIRTUAL
34062-unsigned long __phys_addr(unsigned long x)
34063+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34064 {
34065 unsigned long phys_addr = x - PAGE_OFFSET;
34066 /* VMALLOC_* aren't constants */
34067diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34068index 90555bf..f5f1828 100644
34069--- a/arch/x86/mm/setup_nx.c
34070+++ b/arch/x86/mm/setup_nx.c
34071@@ -5,8 +5,10 @@
34072 #include <asm/pgtable.h>
34073 #include <asm/proto.h>
34074
34075+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34076 static int disable_nx;
34077
34078+#ifndef CONFIG_PAX_PAGEEXEC
34079 /*
34080 * noexec = on|off
34081 *
34082@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34083 return 0;
34084 }
34085 early_param("noexec", noexec_setup);
34086+#endif
34087+
34088+#endif
34089
34090 void x86_configure_nx(void)
34091 {
34092+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34093 if (cpu_has_nx && !disable_nx)
34094 __supported_pte_mask |= _PAGE_NX;
34095 else
34096+#endif
34097 __supported_pte_mask &= ~_PAGE_NX;
34098 }
34099
34100diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34101index 3250f23..7a97ba2 100644
34102--- a/arch/x86/mm/tlb.c
34103+++ b/arch/x86/mm/tlb.c
34104@@ -45,7 +45,11 @@ void leave_mm(int cpu)
34105 BUG();
34106 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34107 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34108+
34109+#ifndef CONFIG_PAX_PER_CPU_PGD
34110 load_cr3(swapper_pg_dir);
34111+#endif
34112+
34113 /*
34114 * This gets called in the idle path where RCU
34115 * functions differently. Tracing normally
34116diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34117new file mode 100644
34118index 0000000..dace51c
34119--- /dev/null
34120+++ b/arch/x86/mm/uderef_64.c
34121@@ -0,0 +1,37 @@
34122+#include <linux/mm.h>
34123+#include <asm/pgtable.h>
34124+#include <asm/uaccess.h>
34125+
34126+#ifdef CONFIG_PAX_MEMORY_UDEREF
34127+/* PaX: due to the special call convention these functions must
34128+ * - remain leaf functions under all configurations,
34129+ * - never be called directly, only dereferenced from the wrappers.
34130+ */
34131+void __pax_open_userland(void)
34132+{
34133+ unsigned int cpu;
34134+
34135+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34136+ return;
34137+
34138+ cpu = raw_get_cpu();
34139+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34140+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34141+ raw_put_cpu_no_resched();
34142+}
34143+EXPORT_SYMBOL(__pax_open_userland);
34144+
34145+void __pax_close_userland(void)
34146+{
34147+ unsigned int cpu;
34148+
34149+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34150+ return;
34151+
34152+ cpu = raw_get_cpu();
34153+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34154+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34155+ raw_put_cpu_no_resched();
34156+}
34157+EXPORT_SYMBOL(__pax_close_userland);
34158+#endif
34159diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34160index 6440221..f84b5c7 100644
34161--- a/arch/x86/net/bpf_jit.S
34162+++ b/arch/x86/net/bpf_jit.S
34163@@ -9,6 +9,7 @@
34164 */
34165 #include <linux/linkage.h>
34166 #include <asm/dwarf2.h>
34167+#include <asm/alternative-asm.h>
34168
34169 /*
34170 * Calling convention :
34171@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34172 jle bpf_slow_path_word
34173 mov (SKBDATA,%rsi),%eax
34174 bswap %eax /* ntohl() */
34175+ pax_force_retaddr
34176 ret
34177
34178 sk_load_half:
34179@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34180 jle bpf_slow_path_half
34181 movzwl (SKBDATA,%rsi),%eax
34182 rol $8,%ax # ntohs()
34183+ pax_force_retaddr
34184 ret
34185
34186 sk_load_byte:
34187@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34188 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34189 jle bpf_slow_path_byte
34190 movzbl (SKBDATA,%rsi),%eax
34191+ pax_force_retaddr
34192 ret
34193
34194 /* rsi contains offset and can be scratched */
34195@@ -90,6 +94,7 @@ bpf_slow_path_word:
34196 js bpf_error
34197 mov - MAX_BPF_STACK + 32(%rbp),%eax
34198 bswap %eax
34199+ pax_force_retaddr
34200 ret
34201
34202 bpf_slow_path_half:
34203@@ -98,12 +103,14 @@ bpf_slow_path_half:
34204 mov - MAX_BPF_STACK + 32(%rbp),%ax
34205 rol $8,%ax
34206 movzwl %ax,%eax
34207+ pax_force_retaddr
34208 ret
34209
34210 bpf_slow_path_byte:
34211 bpf_slow_path_common(1)
34212 js bpf_error
34213 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34214+ pax_force_retaddr
34215 ret
34216
34217 #define sk_negative_common(SIZE) \
34218@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34219 sk_negative_common(4)
34220 mov (%rax), %eax
34221 bswap %eax
34222+ pax_force_retaddr
34223 ret
34224
34225 bpf_slow_path_half_neg:
34226@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34227 mov (%rax),%ax
34228 rol $8,%ax
34229 movzwl %ax,%eax
34230+ pax_force_retaddr
34231 ret
34232
34233 bpf_slow_path_byte_neg:
34234@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34235 .globl sk_load_byte_negative_offset
34236 sk_negative_common(1)
34237 movzbl (%rax), %eax
34238+ pax_force_retaddr
34239 ret
34240
34241 bpf_error:
34242@@ -156,4 +166,5 @@ bpf_error:
34243 mov - MAX_BPF_STACK + 16(%rbp),%r14
34244 mov - MAX_BPF_STACK + 24(%rbp),%r15
34245 leaveq
34246+ pax_force_retaddr
34247 ret
34248diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34249index 9875143..36776ae 100644
34250--- a/arch/x86/net/bpf_jit_comp.c
34251+++ b/arch/x86/net/bpf_jit_comp.c
34252@@ -13,7 +13,11 @@
34253 #include <linux/if_vlan.h>
34254 #include <asm/cacheflush.h>
34255
34256+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34257+int bpf_jit_enable __read_only;
34258+#else
34259 int bpf_jit_enable __read_mostly;
34260+#endif
34261
34262 /*
34263 * assembly code in arch/x86/net/bpf_jit.S
34264@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34265 static void jit_fill_hole(void *area, unsigned int size)
34266 {
34267 /* fill whole space with int3 instructions */
34268+ pax_open_kernel();
34269 memset(area, 0xcc, size);
34270+ pax_close_kernel();
34271 }
34272
34273 struct jit_context {
34274@@ -559,6 +565,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34275 if (is_ereg(dst_reg))
34276 EMIT1(0x41);
34277 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
34278+
34279+ /* emit 'movzwl eax, ax' */
34280+ if (is_ereg(dst_reg))
34281+ EMIT3(0x45, 0x0F, 0xB7);
34282+ else
34283+ EMIT2(0x0F, 0xB7);
34284+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34285 break;
34286 case 32:
34287 /* emit 'bswap eax' to swap lower 4 bytes */
34288@@ -577,6 +590,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
34289 break;
34290
34291 case BPF_ALU | BPF_END | BPF_FROM_LE:
34292+ switch (imm32) {
34293+ case 16:
34294+ /* emit 'movzwl eax, ax' to zero extend 16-bit
34295+ * into 64 bit
34296+ */
34297+ if (is_ereg(dst_reg))
34298+ EMIT3(0x45, 0x0F, 0xB7);
34299+ else
34300+ EMIT2(0x0F, 0xB7);
34301+ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
34302+ break;
34303+ case 32:
34304+ /* emit 'mov eax, eax' to clear upper 32-bits */
34305+ if (is_ereg(dst_reg))
34306+ EMIT1(0x45);
34307+ EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
34308+ break;
34309+ case 64:
34310+ /* nop */
34311+ break;
34312+ }
34313 break;
34314
34315 /* ST: *(u8*)(dst_reg + off) = imm */
34316@@ -896,7 +930,9 @@ common_load:
34317 pr_err("bpf_jit_compile fatal error\n");
34318 return -EFAULT;
34319 }
34320+ pax_open_kernel();
34321 memcpy(image + proglen, temp, ilen);
34322+ pax_close_kernel();
34323 }
34324 proglen += ilen;
34325 addrs[i] = proglen;
34326@@ -968,7 +1004,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34327
34328 if (image) {
34329 bpf_flush_icache(header, image + proglen);
34330- set_memory_ro((unsigned long)header, header->pages);
34331 prog->bpf_func = (void *)image;
34332 prog->jited = true;
34333 }
34334@@ -981,12 +1016,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34335 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34336 struct bpf_binary_header *header = (void *)addr;
34337
34338- if (!fp->jited)
34339- goto free_filter;
34340+ if (fp->jited)
34341+ bpf_jit_binary_free(header);
34342
34343- set_memory_rw(addr, header->pages);
34344- bpf_jit_binary_free(header);
34345-
34346-free_filter:
34347 bpf_prog_unlock_free(fp);
34348 }
34349diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34350index 5d04be5..2beeaa2 100644
34351--- a/arch/x86/oprofile/backtrace.c
34352+++ b/arch/x86/oprofile/backtrace.c
34353@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34354 struct stack_frame_ia32 *fp;
34355 unsigned long bytes;
34356
34357- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34358+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34359 if (bytes != 0)
34360 return NULL;
34361
34362- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34363+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34364
34365 oprofile_add_trace(bufhead[0].return_address);
34366
34367@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34368 struct stack_frame bufhead[2];
34369 unsigned long bytes;
34370
34371- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34372+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34373 if (bytes != 0)
34374 return NULL;
34375
34376@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34377 {
34378 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34379
34380- if (!user_mode_vm(regs)) {
34381+ if (!user_mode(regs)) {
34382 unsigned long stack = kernel_stack_pointer(regs);
34383 if (depth)
34384 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34385diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34386index 1d2e639..f6ef82a 100644
34387--- a/arch/x86/oprofile/nmi_int.c
34388+++ b/arch/x86/oprofile/nmi_int.c
34389@@ -23,6 +23,7 @@
34390 #include <asm/nmi.h>
34391 #include <asm/msr.h>
34392 #include <asm/apic.h>
34393+#include <asm/pgtable.h>
34394
34395 #include "op_counter.h"
34396 #include "op_x86_model.h"
34397@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34398 if (ret)
34399 return ret;
34400
34401- if (!model->num_virt_counters)
34402- model->num_virt_counters = model->num_counters;
34403+ if (!model->num_virt_counters) {
34404+ pax_open_kernel();
34405+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34406+ pax_close_kernel();
34407+ }
34408
34409 mux_init(ops);
34410
34411diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34412index 50d86c0..7985318 100644
34413--- a/arch/x86/oprofile/op_model_amd.c
34414+++ b/arch/x86/oprofile/op_model_amd.c
34415@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34416 num_counters = AMD64_NUM_COUNTERS;
34417 }
34418
34419- op_amd_spec.num_counters = num_counters;
34420- op_amd_spec.num_controls = num_counters;
34421- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34422+ pax_open_kernel();
34423+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34424+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34425+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34426+ pax_close_kernel();
34427
34428 return 0;
34429 }
34430diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34431index d90528e..0127e2b 100644
34432--- a/arch/x86/oprofile/op_model_ppro.c
34433+++ b/arch/x86/oprofile/op_model_ppro.c
34434@@ -19,6 +19,7 @@
34435 #include <asm/msr.h>
34436 #include <asm/apic.h>
34437 #include <asm/nmi.h>
34438+#include <asm/pgtable.h>
34439
34440 #include "op_x86_model.h"
34441 #include "op_counter.h"
34442@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34443
34444 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34445
34446- op_arch_perfmon_spec.num_counters = num_counters;
34447- op_arch_perfmon_spec.num_controls = num_counters;
34448+ pax_open_kernel();
34449+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34450+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34451+ pax_close_kernel();
34452 }
34453
34454 static int arch_perfmon_init(struct oprofile_operations *ignore)
34455diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34456index 71e8a67..6a313bb 100644
34457--- a/arch/x86/oprofile/op_x86_model.h
34458+++ b/arch/x86/oprofile/op_x86_model.h
34459@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34460 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34461 struct op_msrs const * const msrs);
34462 #endif
34463-};
34464+} __do_const;
34465
34466 struct op_counter_config;
34467
34468diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34469index 852aa4c..71613f2 100644
34470--- a/arch/x86/pci/intel_mid_pci.c
34471+++ b/arch/x86/pci/intel_mid_pci.c
34472@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34473 pci_mmcfg_late_init();
34474 pcibios_enable_irq = intel_mid_pci_irq_enable;
34475 pcibios_disable_irq = intel_mid_pci_irq_disable;
34476- pci_root_ops = intel_mid_pci_ops;
34477+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34478 pci_soc_mode = 1;
34479 /* Continue with standard init */
34480 return 1;
34481diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34482index 5dc6ca5..25c03f5 100644
34483--- a/arch/x86/pci/irq.c
34484+++ b/arch/x86/pci/irq.c
34485@@ -51,7 +51,7 @@ struct irq_router {
34486 struct irq_router_handler {
34487 u16 vendor;
34488 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34489-};
34490+} __do_const;
34491
34492 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34493 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34494@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34495 return 0;
34496 }
34497
34498-static __initdata struct irq_router_handler pirq_routers[] = {
34499+static __initconst const struct irq_router_handler pirq_routers[] = {
34500 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34501 { PCI_VENDOR_ID_AL, ali_router_probe },
34502 { PCI_VENDOR_ID_ITE, ite_router_probe },
34503@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34504 static void __init pirq_find_router(struct irq_router *r)
34505 {
34506 struct irq_routing_table *rt = pirq_table;
34507- struct irq_router_handler *h;
34508+ const struct irq_router_handler *h;
34509
34510 #ifdef CONFIG_PCI_BIOS
34511 if (!rt->signature) {
34512@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34513 return 0;
34514 }
34515
34516-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34517+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34518 {
34519 .callback = fix_broken_hp_bios_irq9,
34520 .ident = "HP Pavilion N5400 Series Laptop",
34521diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34522index 9b83b90..4112152 100644
34523--- a/arch/x86/pci/pcbios.c
34524+++ b/arch/x86/pci/pcbios.c
34525@@ -79,7 +79,7 @@ union bios32 {
34526 static struct {
34527 unsigned long address;
34528 unsigned short segment;
34529-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34530+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34531
34532 /*
34533 * Returns the entry point for the given service, NULL on error
34534@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34535 unsigned long length; /* %ecx */
34536 unsigned long entry; /* %edx */
34537 unsigned long flags;
34538+ struct desc_struct d, *gdt;
34539
34540 local_irq_save(flags);
34541- __asm__("lcall *(%%edi); cld"
34542+
34543+ gdt = get_cpu_gdt_table(smp_processor_id());
34544+
34545+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34546+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34547+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34548+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34549+
34550+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34551 : "=a" (return_code),
34552 "=b" (address),
34553 "=c" (length),
34554 "=d" (entry)
34555 : "0" (service),
34556 "1" (0),
34557- "D" (&bios32_indirect));
34558+ "D" (&bios32_indirect),
34559+ "r"(__PCIBIOS_DS)
34560+ : "memory");
34561+
34562+ pax_open_kernel();
34563+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34564+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34565+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34566+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34567+ pax_close_kernel();
34568+
34569 local_irq_restore(flags);
34570
34571 switch (return_code) {
34572- case 0:
34573- return address + entry;
34574- case 0x80: /* Not present */
34575- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34576- return 0;
34577- default: /* Shouldn't happen */
34578- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34579- service, return_code);
34580+ case 0: {
34581+ int cpu;
34582+ unsigned char flags;
34583+
34584+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34585+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34586+ printk(KERN_WARNING "bios32_service: not valid\n");
34587 return 0;
34588+ }
34589+ address = address + PAGE_OFFSET;
34590+ length += 16UL; /* some BIOSs underreport this... */
34591+ flags = 4;
34592+ if (length >= 64*1024*1024) {
34593+ length >>= PAGE_SHIFT;
34594+ flags |= 8;
34595+ }
34596+
34597+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34598+ gdt = get_cpu_gdt_table(cpu);
34599+ pack_descriptor(&d, address, length, 0x9b, flags);
34600+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34601+ pack_descriptor(&d, address, length, 0x93, flags);
34602+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34603+ }
34604+ return entry;
34605+ }
34606+ case 0x80: /* Not present */
34607+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34608+ return 0;
34609+ default: /* Shouldn't happen */
34610+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34611+ service, return_code);
34612+ return 0;
34613 }
34614 }
34615
34616 static struct {
34617 unsigned long address;
34618 unsigned short segment;
34619-} pci_indirect = { 0, __KERNEL_CS };
34620+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34621
34622-static int pci_bios_present;
34623+static int pci_bios_present __read_only;
34624
34625 static int __init check_pcibios(void)
34626 {
34627@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34628 unsigned long flags, pcibios_entry;
34629
34630 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34631- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34632+ pci_indirect.address = pcibios_entry;
34633
34634 local_irq_save(flags);
34635- __asm__(
34636- "lcall *(%%edi); cld\n\t"
34637+ __asm__("movw %w6, %%ds\n\t"
34638+ "lcall *%%ss:(%%edi); cld\n\t"
34639+ "push %%ss\n\t"
34640+ "pop %%ds\n\t"
34641 "jc 1f\n\t"
34642 "xor %%ah, %%ah\n"
34643 "1:"
34644@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34645 "=b" (ebx),
34646 "=c" (ecx)
34647 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34648- "D" (&pci_indirect)
34649+ "D" (&pci_indirect),
34650+ "r" (__PCIBIOS_DS)
34651 : "memory");
34652 local_irq_restore(flags);
34653
34654@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34655
34656 switch (len) {
34657 case 1:
34658- __asm__("lcall *(%%esi); cld\n\t"
34659+ __asm__("movw %w6, %%ds\n\t"
34660+ "lcall *%%ss:(%%esi); cld\n\t"
34661+ "push %%ss\n\t"
34662+ "pop %%ds\n\t"
34663 "jc 1f\n\t"
34664 "xor %%ah, %%ah\n"
34665 "1:"
34666@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34667 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34668 "b" (bx),
34669 "D" ((long)reg),
34670- "S" (&pci_indirect));
34671+ "S" (&pci_indirect),
34672+ "r" (__PCIBIOS_DS));
34673 /*
34674 * Zero-extend the result beyond 8 bits, do not trust the
34675 * BIOS having done it:
34676@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34677 *value &= 0xff;
34678 break;
34679 case 2:
34680- __asm__("lcall *(%%esi); cld\n\t"
34681+ __asm__("movw %w6, %%ds\n\t"
34682+ "lcall *%%ss:(%%esi); cld\n\t"
34683+ "push %%ss\n\t"
34684+ "pop %%ds\n\t"
34685 "jc 1f\n\t"
34686 "xor %%ah, %%ah\n"
34687 "1:"
34688@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34689 : "1" (PCIBIOS_READ_CONFIG_WORD),
34690 "b" (bx),
34691 "D" ((long)reg),
34692- "S" (&pci_indirect));
34693+ "S" (&pci_indirect),
34694+ "r" (__PCIBIOS_DS));
34695 /*
34696 * Zero-extend the result beyond 16 bits, do not trust the
34697 * BIOS having done it:
34698@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34699 *value &= 0xffff;
34700 break;
34701 case 4:
34702- __asm__("lcall *(%%esi); cld\n\t"
34703+ __asm__("movw %w6, %%ds\n\t"
34704+ "lcall *%%ss:(%%esi); cld\n\t"
34705+ "push %%ss\n\t"
34706+ "pop %%ds\n\t"
34707 "jc 1f\n\t"
34708 "xor %%ah, %%ah\n"
34709 "1:"
34710@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34711 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34712 "b" (bx),
34713 "D" ((long)reg),
34714- "S" (&pci_indirect));
34715+ "S" (&pci_indirect),
34716+ "r" (__PCIBIOS_DS));
34717 break;
34718 }
34719
34720@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34721
34722 switch (len) {
34723 case 1:
34724- __asm__("lcall *(%%esi); cld\n\t"
34725+ __asm__("movw %w6, %%ds\n\t"
34726+ "lcall *%%ss:(%%esi); cld\n\t"
34727+ "push %%ss\n\t"
34728+ "pop %%ds\n\t"
34729 "jc 1f\n\t"
34730 "xor %%ah, %%ah\n"
34731 "1:"
34732@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34733 "c" (value),
34734 "b" (bx),
34735 "D" ((long)reg),
34736- "S" (&pci_indirect));
34737+ "S" (&pci_indirect),
34738+ "r" (__PCIBIOS_DS));
34739 break;
34740 case 2:
34741- __asm__("lcall *(%%esi); cld\n\t"
34742+ __asm__("movw %w6, %%ds\n\t"
34743+ "lcall *%%ss:(%%esi); cld\n\t"
34744+ "push %%ss\n\t"
34745+ "pop %%ds\n\t"
34746 "jc 1f\n\t"
34747 "xor %%ah, %%ah\n"
34748 "1:"
34749@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34750 "c" (value),
34751 "b" (bx),
34752 "D" ((long)reg),
34753- "S" (&pci_indirect));
34754+ "S" (&pci_indirect),
34755+ "r" (__PCIBIOS_DS));
34756 break;
34757 case 4:
34758- __asm__("lcall *(%%esi); cld\n\t"
34759+ __asm__("movw %w6, %%ds\n\t"
34760+ "lcall *%%ss:(%%esi); cld\n\t"
34761+ "push %%ss\n\t"
34762+ "pop %%ds\n\t"
34763 "jc 1f\n\t"
34764 "xor %%ah, %%ah\n"
34765 "1:"
34766@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34767 "c" (value),
34768 "b" (bx),
34769 "D" ((long)reg),
34770- "S" (&pci_indirect));
34771+ "S" (&pci_indirect),
34772+ "r" (__PCIBIOS_DS));
34773 break;
34774 }
34775
34776@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34777
34778 DBG("PCI: Fetching IRQ routing table... ");
34779 __asm__("push %%es\n\t"
34780+ "movw %w8, %%ds\n\t"
34781 "push %%ds\n\t"
34782 "pop %%es\n\t"
34783- "lcall *(%%esi); cld\n\t"
34784+ "lcall *%%ss:(%%esi); cld\n\t"
34785 "pop %%es\n\t"
34786+ "push %%ss\n\t"
34787+ "pop %%ds\n"
34788 "jc 1f\n\t"
34789 "xor %%ah, %%ah\n"
34790 "1:"
34791@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34792 "1" (0),
34793 "D" ((long) &opt),
34794 "S" (&pci_indirect),
34795- "m" (opt)
34796+ "m" (opt),
34797+ "r" (__PCIBIOS_DS)
34798 : "memory");
34799 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34800 if (ret & 0xff00)
34801@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34802 {
34803 int ret;
34804
34805- __asm__("lcall *(%%esi); cld\n\t"
34806+ __asm__("movw %w5, %%ds\n\t"
34807+ "lcall *%%ss:(%%esi); cld\n\t"
34808+ "push %%ss\n\t"
34809+ "pop %%ds\n"
34810 "jc 1f\n\t"
34811 "xor %%ah, %%ah\n"
34812 "1:"
34813@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34814 : "0" (PCIBIOS_SET_PCI_HW_INT),
34815 "b" ((dev->bus->number << 8) | dev->devfn),
34816 "c" ((irq << 8) | (pin + 10)),
34817- "S" (&pci_indirect));
34818+ "S" (&pci_indirect),
34819+ "r" (__PCIBIOS_DS));
34820 return !(ret & 0xff00);
34821 }
34822 EXPORT_SYMBOL(pcibios_set_irq_routing);
34823diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34824index 40e7cda..c7e6672 100644
34825--- a/arch/x86/platform/efi/efi_32.c
34826+++ b/arch/x86/platform/efi/efi_32.c
34827@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34828 {
34829 struct desc_ptr gdt_descr;
34830
34831+#ifdef CONFIG_PAX_KERNEXEC
34832+ struct desc_struct d;
34833+#endif
34834+
34835 local_irq_save(efi_rt_eflags);
34836
34837 load_cr3(initial_page_table);
34838 __flush_tlb_all();
34839
34840+#ifdef CONFIG_PAX_KERNEXEC
34841+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34842+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34843+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34844+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34845+#endif
34846+
34847 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34848 gdt_descr.size = GDT_SIZE - 1;
34849 load_gdt(&gdt_descr);
34850@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34851 {
34852 struct desc_ptr gdt_descr;
34853
34854+#ifdef CONFIG_PAX_KERNEXEC
34855+ struct desc_struct d;
34856+
34857+ memset(&d, 0, sizeof d);
34858+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34859+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34860+#endif
34861+
34862 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34863 gdt_descr.size = GDT_SIZE - 1;
34864 load_gdt(&gdt_descr);
34865
34866+#ifdef CONFIG_PAX_PER_CPU_PGD
34867+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34868+#else
34869 load_cr3(swapper_pg_dir);
34870+#endif
34871+
34872 __flush_tlb_all();
34873
34874 local_irq_restore(efi_rt_eflags);
34875diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34876index 17e80d8..9fa6e41 100644
34877--- a/arch/x86/platform/efi/efi_64.c
34878+++ b/arch/x86/platform/efi/efi_64.c
34879@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34880 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34881 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34882 }
34883+
34884+#ifdef CONFIG_PAX_PER_CPU_PGD
34885+ load_cr3(swapper_pg_dir);
34886+#endif
34887+
34888 __flush_tlb_all();
34889 }
34890
34891@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34892 for (pgd = 0; pgd < n_pgds; pgd++)
34893 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34894 kfree(save_pgd);
34895+
34896+#ifdef CONFIG_PAX_PER_CPU_PGD
34897+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34898+#endif
34899+
34900 __flush_tlb_all();
34901 local_irq_restore(efi_flags);
34902 early_code_mapping_set_exec(0);
34903@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34904 unsigned npages;
34905 pgd_t *pgd;
34906
34907- if (efi_enabled(EFI_OLD_MEMMAP))
34908+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34909+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34910+ * able to execute the EFI services.
34911+ */
34912+ if (__supported_pte_mask & _PAGE_NX) {
34913+ unsigned long addr = (unsigned long) __va(0);
34914+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34915+
34916+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34917+#ifdef CONFIG_PAX_PER_CPU_PGD
34918+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34919+#endif
34920+ set_pgd(pgd_offset_k(addr), pe);
34921+ }
34922+
34923 return 0;
34924+ }
34925
34926 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34927 pgd = __va(efi_scratch.efi_pgt);
34928diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34929index 040192b..7d3300f 100644
34930--- a/arch/x86/platform/efi/efi_stub_32.S
34931+++ b/arch/x86/platform/efi/efi_stub_32.S
34932@@ -6,7 +6,9 @@
34933 */
34934
34935 #include <linux/linkage.h>
34936+#include <linux/init.h>
34937 #include <asm/page_types.h>
34938+#include <asm/segment.h>
34939
34940 /*
34941 * efi_call_phys(void *, ...) is a function with variable parameters.
34942@@ -20,7 +22,7 @@
34943 * service functions will comply with gcc calling convention, too.
34944 */
34945
34946-.text
34947+__INIT
34948 ENTRY(efi_call_phys)
34949 /*
34950 * 0. The function can only be called in Linux kernel. So CS has been
34951@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34952 * The mapping of lower virtual memory has been created in prolog and
34953 * epilog.
34954 */
34955- movl $1f, %edx
34956- subl $__PAGE_OFFSET, %edx
34957- jmp *%edx
34958+#ifdef CONFIG_PAX_KERNEXEC
34959+ movl $(__KERNEXEC_EFI_DS), %edx
34960+ mov %edx, %ds
34961+ mov %edx, %es
34962+ mov %edx, %ss
34963+ addl $2f,(1f)
34964+ ljmp *(1f)
34965+
34966+__INITDATA
34967+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34968+.previous
34969+
34970+2:
34971+ subl $2b,(1b)
34972+#else
34973+ jmp 1f-__PAGE_OFFSET
34974 1:
34975+#endif
34976
34977 /*
34978 * 2. Now on the top of stack is the return
34979@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34980 * parameter 2, ..., param n. To make things easy, we save the return
34981 * address of efi_call_phys in a global variable.
34982 */
34983- popl %edx
34984- movl %edx, saved_return_addr
34985- /* get the function pointer into ECX*/
34986- popl %ecx
34987- movl %ecx, efi_rt_function_ptr
34988- movl $2f, %edx
34989- subl $__PAGE_OFFSET, %edx
34990- pushl %edx
34991+ popl (saved_return_addr)
34992+ popl (efi_rt_function_ptr)
34993
34994 /*
34995 * 3. Clear PG bit in %CR0.
34996@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
34997 /*
34998 * 5. Call the physical function.
34999 */
35000- jmp *%ecx
35001+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35002
35003-2:
35004 /*
35005 * 6. After EFI runtime service returns, control will return to
35006 * following instruction. We'd better readjust stack pointer first.
35007@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35008 movl %cr0, %edx
35009 orl $0x80000000, %edx
35010 movl %edx, %cr0
35011- jmp 1f
35012-1:
35013+
35014 /*
35015 * 8. Now restore the virtual mode from flat mode by
35016 * adding EIP with PAGE_OFFSET.
35017 */
35018- movl $1f, %edx
35019- jmp *%edx
35020+#ifdef CONFIG_PAX_KERNEXEC
35021+ movl $(__KERNEL_DS), %edx
35022+ mov %edx, %ds
35023+ mov %edx, %es
35024+ mov %edx, %ss
35025+ ljmp $(__KERNEL_CS),$1f
35026+#else
35027+ jmp 1f+__PAGE_OFFSET
35028+#endif
35029 1:
35030
35031 /*
35032 * 9. Balance the stack. And because EAX contain the return value,
35033 * we'd better not clobber it.
35034 */
35035- leal efi_rt_function_ptr, %edx
35036- movl (%edx), %ecx
35037- pushl %ecx
35038+ pushl (efi_rt_function_ptr)
35039
35040 /*
35041- * 10. Push the saved return address onto the stack and return.
35042+ * 10. Return to the saved return address.
35043 */
35044- leal saved_return_addr, %edx
35045- movl (%edx), %ecx
35046- pushl %ecx
35047- ret
35048+ jmpl *(saved_return_addr)
35049 ENDPROC(efi_call_phys)
35050 .previous
35051
35052-.data
35053+__INITDATA
35054 saved_return_addr:
35055 .long 0
35056 efi_rt_function_ptr:
35057diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35058index 86d0f9e..6d499f4 100644
35059--- a/arch/x86/platform/efi/efi_stub_64.S
35060+++ b/arch/x86/platform/efi/efi_stub_64.S
35061@@ -11,6 +11,7 @@
35062 #include <asm/msr.h>
35063 #include <asm/processor-flags.h>
35064 #include <asm/page_types.h>
35065+#include <asm/alternative-asm.h>
35066
35067 #define SAVE_XMM \
35068 mov %rsp, %rax; \
35069@@ -88,6 +89,7 @@ ENTRY(efi_call)
35070 RESTORE_PGT
35071 addq $48, %rsp
35072 RESTORE_XMM
35073+ pax_force_retaddr 0, 1
35074 ret
35075 ENDPROC(efi_call)
35076
35077diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35078index 3005f0c..d06aeb0 100644
35079--- a/arch/x86/platform/intel-mid/intel-mid.c
35080+++ b/arch/x86/platform/intel-mid/intel-mid.c
35081@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35082 /* intel_mid_ops to store sub arch ops */
35083 struct intel_mid_ops *intel_mid_ops;
35084 /* getter function for sub arch ops*/
35085-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35086+static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35087 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35088 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35089
35090@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35091 {
35092 };
35093
35094-static void intel_mid_reboot(void)
35095+static void __noreturn intel_mid_reboot(void)
35096 {
35097 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35098+ BUG();
35099 }
35100
35101 static unsigned long __init intel_mid_calibrate_tsc(void)
35102diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35103index 3c1c386..59a68ed 100644
35104--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35105+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35106@@ -13,6 +13,6 @@
35107 /* For every CPU addition a new get_<cpuname>_ops interface needs
35108 * to be added.
35109 */
35110-extern void *get_penwell_ops(void);
35111-extern void *get_cloverview_ops(void);
35112-extern void *get_tangier_ops(void);
35113+extern const void *get_penwell_ops(void);
35114+extern const void *get_cloverview_ops(void);
35115+extern const void *get_tangier_ops(void);
35116diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35117index 23381d2..8ddc10e 100644
35118--- a/arch/x86/platform/intel-mid/mfld.c
35119+++ b/arch/x86/platform/intel-mid/mfld.c
35120@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35121 pm_power_off = mfld_power_off;
35122 }
35123
35124-void *get_penwell_ops(void)
35125+const void *get_penwell_ops(void)
35126 {
35127 return &penwell_ops;
35128 }
35129
35130-void *get_cloverview_ops(void)
35131+const void *get_cloverview_ops(void)
35132 {
35133 return &penwell_ops;
35134 }
35135diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35136index aaca917..66eadbc 100644
35137--- a/arch/x86/platform/intel-mid/mrfl.c
35138+++ b/arch/x86/platform/intel-mid/mrfl.c
35139@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35140 .arch_setup = tangier_arch_setup,
35141 };
35142
35143-void *get_tangier_ops(void)
35144+const void *get_tangier_ops(void)
35145 {
35146 return &tangier_ops;
35147 }
35148diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35149index c9a0838..fae0977 100644
35150--- a/arch/x86/platform/intel-quark/imr_selftest.c
35151+++ b/arch/x86/platform/intel-quark/imr_selftest.c
35152@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35153 */
35154 static void __init imr_self_test(void)
35155 {
35156- phys_addr_t base = virt_to_phys(&_text);
35157+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35158 size_t size = virt_to_phys(&__end_rodata) - base;
35159 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35160 int ret;
35161diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35162index d6ee929..3637cb5 100644
35163--- a/arch/x86/platform/olpc/olpc_dt.c
35164+++ b/arch/x86/platform/olpc/olpc_dt.c
35165@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35166 return res;
35167 }
35168
35169-static struct of_pdt_ops prom_olpc_ops __initdata = {
35170+static struct of_pdt_ops prom_olpc_ops __initconst = {
35171 .nextprop = olpc_dt_nextprop,
35172 .getproplen = olpc_dt_getproplen,
35173 .getproperty = olpc_dt_getproperty,
35174diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35175index 3e32ed5..cc0adc5 100644
35176--- a/arch/x86/power/cpu.c
35177+++ b/arch/x86/power/cpu.c
35178@@ -134,11 +134,8 @@ static void do_fpu_end(void)
35179 static void fix_processor_context(void)
35180 {
35181 int cpu = smp_processor_id();
35182- struct tss_struct *t = &per_cpu(init_tss, cpu);
35183-#ifdef CONFIG_X86_64
35184- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35185- tss_desc tss;
35186-#endif
35187+ struct tss_struct *t = init_tss + cpu;
35188+
35189 set_tss_desc(cpu, t); /*
35190 * This just modifies memory; should not be
35191 * necessary. But... This is necessary, because
35192@@ -147,10 +144,6 @@ static void fix_processor_context(void)
35193 */
35194
35195 #ifdef CONFIG_X86_64
35196- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35197- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35198- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35199-
35200 syscall_init(); /* This sets MSR_*STAR and related */
35201 #endif
35202 load_TR_desc(); /* This does ltr */
35203diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35204index 0b7a63d..0d0f2c2 100644
35205--- a/arch/x86/realmode/init.c
35206+++ b/arch/x86/realmode/init.c
35207@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35208 __va(real_mode_header->trampoline_header);
35209
35210 #ifdef CONFIG_X86_32
35211- trampoline_header->start = __pa_symbol(startup_32_smp);
35212+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35213+
35214+#ifdef CONFIG_PAX_KERNEXEC
35215+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35216+#endif
35217+
35218+ trampoline_header->boot_cs = __BOOT_CS;
35219 trampoline_header->gdt_limit = __BOOT_DS + 7;
35220 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35221 #else
35222@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35223 *trampoline_cr4_features = __read_cr4();
35224
35225 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35226- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35227+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35228 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35229 #endif
35230 }
35231diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35232index 2730d77..2e4cd19 100644
35233--- a/arch/x86/realmode/rm/Makefile
35234+++ b/arch/x86/realmode/rm/Makefile
35235@@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35236
35237 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35238 -I$(srctree)/arch/x86/boot
35239+ifdef CONSTIFY_PLUGIN
35240+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35241+endif
35242 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35243 GCOV_PROFILE := n
35244diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35245index a28221d..93c40f1 100644
35246--- a/arch/x86/realmode/rm/header.S
35247+++ b/arch/x86/realmode/rm/header.S
35248@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35249 #endif
35250 /* APM/BIOS reboot */
35251 .long pa_machine_real_restart_asm
35252-#ifdef CONFIG_X86_64
35253+#ifdef CONFIG_X86_32
35254+ .long __KERNEL_CS
35255+#else
35256 .long __KERNEL32_CS
35257 #endif
35258 END(real_mode_header)
35259diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
35260index d66c607..3def845 100644
35261--- a/arch/x86/realmode/rm/reboot.S
35262+++ b/arch/x86/realmode/rm/reboot.S
35263@@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
35264 lgdtl pa_tr_gdt
35265
35266 /* Disable paging to drop us out of long mode */
35267+ movl %cr4, %eax
35268+ andl $~X86_CR4_PCIDE, %eax
35269+ movl %eax, %cr4
35270+
35271 movl %cr0, %eax
35272 andl $~X86_CR0_PG, %eax
35273 movl %eax, %cr0
35274diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35275index 48ddd76..c26749f 100644
35276--- a/arch/x86/realmode/rm/trampoline_32.S
35277+++ b/arch/x86/realmode/rm/trampoline_32.S
35278@@ -24,6 +24,12 @@
35279 #include <asm/page_types.h>
35280 #include "realmode.h"
35281
35282+#ifdef CONFIG_PAX_KERNEXEC
35283+#define ta(X) (X)
35284+#else
35285+#define ta(X) (pa_ ## X)
35286+#endif
35287+
35288 .text
35289 .code16
35290
35291@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35292
35293 cli # We should be safe anyway
35294
35295- movl tr_start, %eax # where we need to go
35296-
35297 movl $0xA5A5A5A5, trampoline_status
35298 # write marker for master knows we're running
35299
35300@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35301 movw $1, %dx # protected mode (PE) bit
35302 lmsw %dx # into protected mode
35303
35304- ljmpl $__BOOT_CS, $pa_startup_32
35305+ ljmpl *(trampoline_header)
35306
35307 .section ".text32","ax"
35308 .code32
35309@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35310 .balign 8
35311 GLOBAL(trampoline_header)
35312 tr_start: .space 4
35313- tr_gdt_pad: .space 2
35314+ tr_boot_cs: .space 2
35315 tr_gdt: .space 6
35316 END(trampoline_header)
35317
35318diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35319index dac7b20..72dbaca 100644
35320--- a/arch/x86/realmode/rm/trampoline_64.S
35321+++ b/arch/x86/realmode/rm/trampoline_64.S
35322@@ -93,6 +93,7 @@ ENTRY(startup_32)
35323 movl %edx, %gs
35324
35325 movl pa_tr_cr4, %eax
35326+ andl $~X86_CR4_PCIDE, %eax
35327 movl %eax, %cr4 # Enable PAE mode
35328
35329 # Setup trampoline 4 level pagetables
35330@@ -106,7 +107,7 @@ ENTRY(startup_32)
35331 wrmsr
35332
35333 # Enable paging and in turn activate Long Mode
35334- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35335+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35336 movl %eax, %cr0
35337
35338 /*
35339diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35340index 9e7e147..25a4158 100644
35341--- a/arch/x86/realmode/rm/wakeup_asm.S
35342+++ b/arch/x86/realmode/rm/wakeup_asm.S
35343@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35344 lgdtl pmode_gdt
35345
35346 /* This really couldn't... */
35347- movl pmode_entry, %eax
35348 movl pmode_cr0, %ecx
35349 movl %ecx, %cr0
35350- ljmpl $__KERNEL_CS, $pa_startup_32
35351- /* -> jmp *%eax in trampoline_32.S */
35352+
35353+ ljmpl *pmode_entry
35354 #else
35355 jmp trampoline_start
35356 #endif
35357diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35358index 604a37e..e49702a 100644
35359--- a/arch/x86/tools/Makefile
35360+++ b/arch/x86/tools/Makefile
35361@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35362
35363 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35364
35365-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35366+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35367 hostprogs-y += relocs
35368 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35369 PHONY += relocs
35370diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35371index 0c2fae8..88036b7 100644
35372--- a/arch/x86/tools/relocs.c
35373+++ b/arch/x86/tools/relocs.c
35374@@ -1,5 +1,7 @@
35375 /* This is included from relocs_32/64.c */
35376
35377+#include "../../../include/generated/autoconf.h"
35378+
35379 #define ElfW(type) _ElfW(ELF_BITS, type)
35380 #define _ElfW(bits, type) __ElfW(bits, type)
35381 #define __ElfW(bits, type) Elf##bits##_##type
35382@@ -11,6 +13,7 @@
35383 #define Elf_Sym ElfW(Sym)
35384
35385 static Elf_Ehdr ehdr;
35386+static Elf_Phdr *phdr;
35387
35388 struct relocs {
35389 uint32_t *offset;
35390@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35391 }
35392 }
35393
35394+static void read_phdrs(FILE *fp)
35395+{
35396+ unsigned int i;
35397+
35398+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35399+ if (!phdr) {
35400+ die("Unable to allocate %d program headers\n",
35401+ ehdr.e_phnum);
35402+ }
35403+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35404+ die("Seek to %d failed: %s\n",
35405+ ehdr.e_phoff, strerror(errno));
35406+ }
35407+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35408+ die("Cannot read ELF program headers: %s\n",
35409+ strerror(errno));
35410+ }
35411+ for(i = 0; i < ehdr.e_phnum; i++) {
35412+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35413+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35414+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35415+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35416+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35417+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35418+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35419+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35420+ }
35421+
35422+}
35423+
35424 static void read_shdrs(FILE *fp)
35425 {
35426- int i;
35427+ unsigned int i;
35428 Elf_Shdr shdr;
35429
35430 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35431@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35432
35433 static void read_strtabs(FILE *fp)
35434 {
35435- int i;
35436+ unsigned int i;
35437 for (i = 0; i < ehdr.e_shnum; i++) {
35438 struct section *sec = &secs[i];
35439 if (sec->shdr.sh_type != SHT_STRTAB) {
35440@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35441
35442 static void read_symtabs(FILE *fp)
35443 {
35444- int i,j;
35445+ unsigned int i,j;
35446 for (i = 0; i < ehdr.e_shnum; i++) {
35447 struct section *sec = &secs[i];
35448 if (sec->shdr.sh_type != SHT_SYMTAB) {
35449@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35450 }
35451
35452
35453-static void read_relocs(FILE *fp)
35454+static void read_relocs(FILE *fp, int use_real_mode)
35455 {
35456- int i,j;
35457+ unsigned int i,j;
35458+ uint32_t base;
35459+
35460 for (i = 0; i < ehdr.e_shnum; i++) {
35461 struct section *sec = &secs[i];
35462 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35463@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35464 die("Cannot read symbol table: %s\n",
35465 strerror(errno));
35466 }
35467+ base = 0;
35468+
35469+#ifdef CONFIG_X86_32
35470+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35471+ if (phdr[j].p_type != PT_LOAD )
35472+ continue;
35473+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35474+ continue;
35475+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35476+ break;
35477+ }
35478+#endif
35479+
35480 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35481 Elf_Rel *rel = &sec->reltab[j];
35482- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35483+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35484 rel->r_info = elf_xword_to_cpu(rel->r_info);
35485 #if (SHT_REL_TYPE == SHT_RELA)
35486 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35487@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35488
35489 static void print_absolute_symbols(void)
35490 {
35491- int i;
35492+ unsigned int i;
35493 const char *format;
35494
35495 if (ELF_BITS == 64)
35496@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35497 for (i = 0; i < ehdr.e_shnum; i++) {
35498 struct section *sec = &secs[i];
35499 char *sym_strtab;
35500- int j;
35501+ unsigned int j;
35502
35503 if (sec->shdr.sh_type != SHT_SYMTAB) {
35504 continue;
35505@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35506
35507 static void print_absolute_relocs(void)
35508 {
35509- int i, printed = 0;
35510+ unsigned int i, printed = 0;
35511 const char *format;
35512
35513 if (ELF_BITS == 64)
35514@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35515 struct section *sec_applies, *sec_symtab;
35516 char *sym_strtab;
35517 Elf_Sym *sh_symtab;
35518- int j;
35519+ unsigned int j;
35520 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35521 continue;
35522 }
35523@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35524 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35525 Elf_Sym *sym, const char *symname))
35526 {
35527- int i;
35528+ unsigned int i;
35529 /* Walk through the relocations */
35530 for (i = 0; i < ehdr.e_shnum; i++) {
35531 char *sym_strtab;
35532 Elf_Sym *sh_symtab;
35533 struct section *sec_applies, *sec_symtab;
35534- int j;
35535+ unsigned int j;
35536 struct section *sec = &secs[i];
35537
35538 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35539@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35540 {
35541 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35542 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35543+ char *sym_strtab = sec->link->link->strtab;
35544+
35545+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35546+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35547+ return 0;
35548+
35549+#ifdef CONFIG_PAX_KERNEXEC
35550+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35551+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35552+ return 0;
35553+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35554+ return 0;
35555+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35556+ return 0;
35557+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35558+ return 0;
35559+#endif
35560
35561 switch (r_type) {
35562 case R_386_NONE:
35563@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35564
35565 static void emit_relocs(int as_text, int use_real_mode)
35566 {
35567- int i;
35568+ unsigned int i;
35569 int (*write_reloc)(uint32_t, FILE *) = write32;
35570 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35571 const char *symname);
35572@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35573 {
35574 regex_init(use_real_mode);
35575 read_ehdr(fp);
35576+ read_phdrs(fp);
35577 read_shdrs(fp);
35578 read_strtabs(fp);
35579 read_symtabs(fp);
35580- read_relocs(fp);
35581+ read_relocs(fp, use_real_mode);
35582 if (ELF_BITS == 64)
35583 percpu_init();
35584 if (show_absolute_syms) {
35585diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35586index f40281e..92728c9 100644
35587--- a/arch/x86/um/mem_32.c
35588+++ b/arch/x86/um/mem_32.c
35589@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35590 gate_vma.vm_start = FIXADDR_USER_START;
35591 gate_vma.vm_end = FIXADDR_USER_END;
35592 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35593- gate_vma.vm_page_prot = __P101;
35594+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35595
35596 return 0;
35597 }
35598diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35599index 80ffa5b..a33bd15 100644
35600--- a/arch/x86/um/tls_32.c
35601+++ b/arch/x86/um/tls_32.c
35602@@ -260,7 +260,7 @@ out:
35603 if (unlikely(task == current &&
35604 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35605 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35606- "without flushed TLS.", current->pid);
35607+ "without flushed TLS.", task_pid_nr(current));
35608 }
35609
35610 return 0;
35611diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35612index 7b9be98..39bb57f 100644
35613--- a/arch/x86/vdso/Makefile
35614+++ b/arch/x86/vdso/Makefile
35615@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35616 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35617 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35618
35619-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35620+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35621 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35622 GCOV_PROFILE := n
35623
35624diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35625index 0224987..c7d65a5 100644
35626--- a/arch/x86/vdso/vdso2c.h
35627+++ b/arch/x86/vdso/vdso2c.h
35628@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35629 unsigned long load_size = -1; /* Work around bogus warning */
35630 unsigned long mapping_size;
35631 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35632- int i;
35633+ unsigned int i;
35634 unsigned long j;
35635 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35636 *alt_sec = NULL;
35637diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35638index e904c27..b9eaa03 100644
35639--- a/arch/x86/vdso/vdso32-setup.c
35640+++ b/arch/x86/vdso/vdso32-setup.c
35641@@ -14,6 +14,7 @@
35642 #include <asm/cpufeature.h>
35643 #include <asm/processor.h>
35644 #include <asm/vdso.h>
35645+#include <asm/mman.h>
35646
35647 #ifdef CONFIG_COMPAT_VDSO
35648 #define VDSO_DEFAULT 0
35649diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35650index 1c9f750..cfddb1a 100644
35651--- a/arch/x86/vdso/vma.c
35652+++ b/arch/x86/vdso/vma.c
35653@@ -19,10 +19,7 @@
35654 #include <asm/page.h>
35655 #include <asm/hpet.h>
35656 #include <asm/desc.h>
35657-
35658-#if defined(CONFIG_X86_64)
35659-unsigned int __read_mostly vdso64_enabled = 1;
35660-#endif
35661+#include <asm/mman.h>
35662
35663 void __init init_vdso_image(const struct vdso_image *image)
35664 {
35665@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35666 .pages = no_pages,
35667 };
35668
35669+#ifdef CONFIG_PAX_RANDMMAP
35670+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35671+ calculate_addr = false;
35672+#endif
35673+
35674 if (calculate_addr) {
35675 addr = vdso_addr(current->mm->start_stack,
35676 image->size - image->sym_vvar_start);
35677@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35678 down_write(&mm->mmap_sem);
35679
35680 addr = get_unmapped_area(NULL, addr,
35681- image->size - image->sym_vvar_start, 0, 0);
35682+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35683 if (IS_ERR_VALUE(addr)) {
35684 ret = addr;
35685 goto up_fail;
35686 }
35687
35688 text_start = addr - image->sym_vvar_start;
35689- current->mm->context.vdso = (void __user *)text_start;
35690+ mm->context.vdso = text_start;
35691
35692 /*
35693 * MAYWRITE to allow gdb to COW and set breakpoints
35694@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35695 hpet_address >> PAGE_SHIFT,
35696 PAGE_SIZE,
35697 pgprot_noncached(PAGE_READONLY));
35698-
35699- if (ret)
35700- goto up_fail;
35701 }
35702 #endif
35703
35704 up_fail:
35705 if (ret)
35706- current->mm->context.vdso = NULL;
35707+ current->mm->context.vdso = 0;
35708
35709 up_write(&mm->mmap_sem);
35710 return ret;
35711@@ -191,8 +190,8 @@ static int load_vdso32(void)
35712
35713 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35714 current_thread_info()->sysenter_return =
35715- current->mm->context.vdso +
35716- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35717+ (void __force_user *)(current->mm->context.vdso +
35718+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35719
35720 return 0;
35721 }
35722@@ -201,9 +200,6 @@ static int load_vdso32(void)
35723 #ifdef CONFIG_X86_64
35724 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35725 {
35726- if (!vdso64_enabled)
35727- return 0;
35728-
35729 return map_vdso(&vdso_image_64, true);
35730 }
35731
35732@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35733 int uses_interp)
35734 {
35735 #ifdef CONFIG_X86_X32_ABI
35736- if (test_thread_flag(TIF_X32)) {
35737- if (!vdso64_enabled)
35738- return 0;
35739-
35740+ if (test_thread_flag(TIF_X32))
35741 return map_vdso(&vdso_image_x32, true);
35742- }
35743 #endif
35744
35745 return load_vdso32();
35746@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35747 #endif
35748
35749 #ifdef CONFIG_X86_64
35750-static __init int vdso_setup(char *s)
35751-{
35752- vdso64_enabled = simple_strtoul(s, NULL, 0);
35753- return 0;
35754-}
35755-__setup("vdso=", vdso_setup);
35756-#endif
35757-
35758-#ifdef CONFIG_X86_64
35759 static void vgetcpu_cpu_init(void *arg)
35760 {
35761 int cpu = smp_processor_id();
35762diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35763index e88fda8..76ce7ce 100644
35764--- a/arch/x86/xen/Kconfig
35765+++ b/arch/x86/xen/Kconfig
35766@@ -9,6 +9,7 @@ config XEN
35767 select XEN_HAVE_PVMMU
35768 depends on X86_64 || (X86_32 && X86_PAE)
35769 depends on X86_TSC
35770+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35771 help
35772 This is the Linux Xen port. Enabling this will allow the
35773 kernel to boot in a paravirtualized environment under the
35774diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35775index 5240f56..0c12163 100644
35776--- a/arch/x86/xen/enlighten.c
35777+++ b/arch/x86/xen/enlighten.c
35778@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35779
35780 struct shared_info xen_dummy_shared_info;
35781
35782-void *xen_initial_gdt;
35783-
35784 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35785 __read_mostly int xen_have_vector_callback;
35786 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35787@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35788 {
35789 unsigned long va = dtr->address;
35790 unsigned int size = dtr->size + 1;
35791- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35792- unsigned long frames[pages];
35793+ unsigned long frames[65536 / PAGE_SIZE];
35794 int f;
35795
35796 /*
35797@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35798 {
35799 unsigned long va = dtr->address;
35800 unsigned int size = dtr->size + 1;
35801- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35802- unsigned long frames[pages];
35803+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35804 int f;
35805
35806 /*
35807@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35808 * 8-byte entries, or 16 4k pages..
35809 */
35810
35811- BUG_ON(size > 65536);
35812+ BUG_ON(size > GDT_SIZE);
35813 BUG_ON(va & ~PAGE_MASK);
35814
35815 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35816@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35817 return 0;
35818 }
35819
35820-static void set_xen_basic_apic_ops(void)
35821+static void __init set_xen_basic_apic_ops(void)
35822 {
35823 apic->read = xen_apic_read;
35824 apic->write = xen_apic_write;
35825@@ -1308,30 +1304,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35826 #endif
35827 };
35828
35829-static void xen_reboot(int reason)
35830+static __noreturn void xen_reboot(int reason)
35831 {
35832 struct sched_shutdown r = { .reason = reason };
35833
35834- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35835- BUG();
35836+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35837+ BUG();
35838 }
35839
35840-static void xen_restart(char *msg)
35841+static __noreturn void xen_restart(char *msg)
35842 {
35843 xen_reboot(SHUTDOWN_reboot);
35844 }
35845
35846-static void xen_emergency_restart(void)
35847+static __noreturn void xen_emergency_restart(void)
35848 {
35849 xen_reboot(SHUTDOWN_reboot);
35850 }
35851
35852-static void xen_machine_halt(void)
35853+static __noreturn void xen_machine_halt(void)
35854 {
35855 xen_reboot(SHUTDOWN_poweroff);
35856 }
35857
35858-static void xen_machine_power_off(void)
35859+static __noreturn void xen_machine_power_off(void)
35860 {
35861 if (pm_power_off)
35862 pm_power_off();
35863@@ -1484,8 +1480,11 @@ static void __ref xen_setup_gdt(int cpu)
35864 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35865 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35866
35867- setup_stack_canary_segment(0);
35868- switch_to_new_gdt(0);
35869+ setup_stack_canary_segment(cpu);
35870+#ifdef CONFIG_X86_64
35871+ load_percpu_segment(cpu);
35872+#endif
35873+ switch_to_new_gdt(cpu);
35874
35875 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35876 pv_cpu_ops.load_gdt = xen_load_gdt;
35877@@ -1600,7 +1599,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35878 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35879
35880 /* Work out if we support NX */
35881- x86_configure_nx();
35882+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35883+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35884+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35885+ unsigned l, h;
35886+
35887+ __supported_pte_mask |= _PAGE_NX;
35888+ rdmsr(MSR_EFER, l, h);
35889+ l |= EFER_NX;
35890+ wrmsr(MSR_EFER, l, h);
35891+ }
35892+#endif
35893
35894 /* Get mfn list */
35895 xen_build_dynamic_phys_to_machine();
35896@@ -1628,13 +1637,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35897
35898 machine_ops = xen_machine_ops;
35899
35900- /*
35901- * The only reliable way to retain the initial address of the
35902- * percpu gdt_page is to remember it here, so we can go and
35903- * mark it RW later, when the initial percpu area is freed.
35904- */
35905- xen_initial_gdt = &per_cpu(gdt_page, 0);
35906-
35907 xen_smp_init();
35908
35909 #ifdef CONFIG_ACPI_NUMA
35910diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35911index adca9e2..cdba9d1 100644
35912--- a/arch/x86/xen/mmu.c
35913+++ b/arch/x86/xen/mmu.c
35914@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35915 return val;
35916 }
35917
35918-static pteval_t pte_pfn_to_mfn(pteval_t val)
35919+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35920 {
35921 if (val & _PAGE_PRESENT) {
35922 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35923@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35924 * L3_k[511] -> level2_fixmap_pgt */
35925 convert_pfn_mfn(level3_kernel_pgt);
35926
35927+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35928+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35929+ convert_pfn_mfn(level3_vmemmap_pgt);
35930 /* L3_k[511][506] -> level1_fixmap_pgt */
35931+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35932 convert_pfn_mfn(level2_fixmap_pgt);
35933 }
35934 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35935@@ -1860,11 +1864,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35936 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35937 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35938 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35939+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35940+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35941+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35942 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35943 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35944+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35945 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35946 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35947- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35948+ set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO);
35949+ set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO);
35950+ set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO);
35951+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35952
35953 /* Pin down new L4 */
35954 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35955@@ -2048,6 +2059,7 @@ static void __init xen_post_allocator_init(void)
35956 pv_mmu_ops.set_pud = xen_set_pud;
35957 #if PAGETABLE_LEVELS == 4
35958 pv_mmu_ops.set_pgd = xen_set_pgd;
35959+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35960 #endif
35961
35962 /* This will work as long as patching hasn't happened yet
35963@@ -2126,6 +2138,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35964 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35965 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35966 .set_pgd = xen_set_pgd_hyper,
35967+ .set_pgd_batched = xen_set_pgd_hyper,
35968
35969 .alloc_pud = xen_alloc_pmd_init,
35970 .release_pud = xen_release_pmd_init,
35971diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35972index 08e8489..b1e182f 100644
35973--- a/arch/x86/xen/smp.c
35974+++ b/arch/x86/xen/smp.c
35975@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35976
35977 if (xen_pv_domain()) {
35978 if (!xen_feature(XENFEAT_writable_page_tables))
35979- /* We've switched to the "real" per-cpu gdt, so make
35980- * sure the old memory can be recycled. */
35981- make_lowmem_page_readwrite(xen_initial_gdt);
35982-
35983 #ifdef CONFIG_X86_32
35984 /*
35985 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35986 * expects __USER_DS
35987 */
35988- loadsegment(ds, __USER_DS);
35989- loadsegment(es, __USER_DS);
35990+ loadsegment(ds, __KERNEL_DS);
35991+ loadsegment(es, __KERNEL_DS);
35992 #endif
35993
35994 xen_filter_cpu_maps();
35995@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35996 #ifdef CONFIG_X86_32
35997 /* Note: PVH is not yet supported on x86_32. */
35998 ctxt->user_regs.fs = __KERNEL_PERCPU;
35999- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36000+ savesegment(gs, ctxt->user_regs.gs);
36001 #endif
36002 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
36003
36004@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36005 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36006 ctxt->flags = VGCF_IN_KERNEL;
36007 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36008- ctxt->user_regs.ds = __USER_DS;
36009- ctxt->user_regs.es = __USER_DS;
36010+ ctxt->user_regs.ds = __KERNEL_DS;
36011+ ctxt->user_regs.es = __KERNEL_DS;
36012 ctxt->user_regs.ss = __KERNEL_DS;
36013
36014 xen_copy_trap_info(ctxt->trap_ctxt);
36015@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36016 int rc;
36017
36018 per_cpu(current_task, cpu) = idle;
36019+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36020 #ifdef CONFIG_X86_32
36021 irq_ctx_init(cpu);
36022 #else
36023 clear_tsk_thread_flag(idle, TIF_FORK);
36024 #endif
36025- per_cpu(kernel_stack, cpu) =
36026- (unsigned long)task_stack_page(idle) -
36027- KERNEL_STACK_OFFSET + THREAD_SIZE;
36028+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36029
36030 xen_setup_runstate_info(cpu);
36031 xen_setup_timer(cpu);
36032@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36033
36034 void __init xen_smp_init(void)
36035 {
36036- smp_ops = xen_smp_ops;
36037+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36038 xen_fill_possible_map();
36039 }
36040
36041diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36042index fd92a64..1f72641 100644
36043--- a/arch/x86/xen/xen-asm_32.S
36044+++ b/arch/x86/xen/xen-asm_32.S
36045@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36046 pushw %fs
36047 movl $(__KERNEL_PERCPU), %eax
36048 movl %eax, %fs
36049- movl %fs:xen_vcpu, %eax
36050+ mov PER_CPU_VAR(xen_vcpu), %eax
36051 POP_FS
36052 #else
36053 movl %ss:xen_vcpu, %eax
36054diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36055index 674b2225..f1f5dc1 100644
36056--- a/arch/x86/xen/xen-head.S
36057+++ b/arch/x86/xen/xen-head.S
36058@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36059 #ifdef CONFIG_X86_32
36060 mov %esi,xen_start_info
36061 mov $init_thread_union+THREAD_SIZE,%esp
36062+#ifdef CONFIG_SMP
36063+ movl $cpu_gdt_table,%edi
36064+ movl $__per_cpu_load,%eax
36065+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36066+ rorl $16,%eax
36067+ movb %al,__KERNEL_PERCPU + 4(%edi)
36068+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36069+ movl $__per_cpu_end - 1,%eax
36070+ subl $__per_cpu_start,%eax
36071+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36072+#endif
36073 #else
36074 mov %rsi,xen_start_info
36075 mov $init_thread_union+THREAD_SIZE,%rsp
36076diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36077index 9e195c6..523ed36 100644
36078--- a/arch/x86/xen/xen-ops.h
36079+++ b/arch/x86/xen/xen-ops.h
36080@@ -16,8 +16,6 @@ void xen_syscall_target(void);
36081 void xen_syscall32_target(void);
36082 #endif
36083
36084-extern void *xen_initial_gdt;
36085-
36086 struct trap_info;
36087 void xen_copy_trap_info(struct trap_info *traps);
36088
36089diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36090index 525bd3d..ef888b1 100644
36091--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36092+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36093@@ -119,9 +119,9 @@
36094 ----------------------------------------------------------------------*/
36095
36096 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36097-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36098 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36099 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36100+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36101
36102 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36103 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36104diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36105index 2f33760..835e50a 100644
36106--- a/arch/xtensa/variants/fsf/include/variant/core.h
36107+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36108@@ -11,6 +11,7 @@
36109 #ifndef _XTENSA_CORE_H
36110 #define _XTENSA_CORE_H
36111
36112+#include <linux/const.h>
36113
36114 /****************************************************************************
36115 Parameters Useful for Any Code, USER or PRIVILEGED
36116@@ -112,9 +113,9 @@
36117 ----------------------------------------------------------------------*/
36118
36119 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36120-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36121 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36122 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36123+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36124
36125 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36126 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36127diff --git a/block/bio.c b/block/bio.c
36128index f66a4ea..73ddf55 100644
36129--- a/block/bio.c
36130+++ b/block/bio.c
36131@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36132 /*
36133 * Overflow, abort
36134 */
36135- if (end < start)
36136+ if (end < start || end - start > INT_MAX - nr_pages)
36137 return ERR_PTR(-EINVAL);
36138
36139 nr_pages += end - start;
36140@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36141 /*
36142 * Overflow, abort
36143 */
36144- if (end < start)
36145+ if (end < start || end - start > INT_MAX - nr_pages)
36146 return ERR_PTR(-EINVAL);
36147
36148 nr_pages += end - start;
36149diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36150index 0736729..2ec3b48 100644
36151--- a/block/blk-iopoll.c
36152+++ b/block/blk-iopoll.c
36153@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36154 }
36155 EXPORT_SYMBOL(blk_iopoll_complete);
36156
36157-static void blk_iopoll_softirq(struct softirq_action *h)
36158+static __latent_entropy void blk_iopoll_softirq(void)
36159 {
36160 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36161 int rearm = 0, budget = blk_iopoll_budget;
36162diff --git a/block/blk-map.c b/block/blk-map.c
36163index b8d2725..08c52b0 100644
36164--- a/block/blk-map.c
36165+++ b/block/blk-map.c
36166@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36167 if (!len || !kbuf)
36168 return -EINVAL;
36169
36170- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36171+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36172 if (do_copy)
36173 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36174 else
36175diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36176index 53b1737..08177d2e 100644
36177--- a/block/blk-softirq.c
36178+++ b/block/blk-softirq.c
36179@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36180 * Softirq action handler - move entries to local list and loop over them
36181 * while passing them to the queue registered handler.
36182 */
36183-static void blk_done_softirq(struct softirq_action *h)
36184+static __latent_entropy void blk_done_softirq(void)
36185 {
36186 struct list_head *cpu_list, local_list;
36187
36188diff --git a/block/bsg.c b/block/bsg.c
36189index d214e92..9649863 100644
36190--- a/block/bsg.c
36191+++ b/block/bsg.c
36192@@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36193 struct sg_io_v4 *hdr, struct bsg_device *bd,
36194 fmode_t has_write_perm)
36195 {
36196+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36197+ unsigned char *cmdptr;
36198+
36199 if (hdr->request_len > BLK_MAX_CDB) {
36200 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36201 if (!rq->cmd)
36202 return -ENOMEM;
36203- }
36204+ cmdptr = rq->cmd;
36205+ } else
36206+ cmdptr = tmpcmd;
36207
36208- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36209+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36210 hdr->request_len))
36211 return -EFAULT;
36212
36213+ if (cmdptr != rq->cmd)
36214+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36215+
36216 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36217 if (blk_verify_command(rq->cmd, has_write_perm))
36218 return -EPERM;
36219diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36220index f678c73..f35aa18 100644
36221--- a/block/compat_ioctl.c
36222+++ b/block/compat_ioctl.c
36223@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36224 cgc = compat_alloc_user_space(sizeof(*cgc));
36225 cgc32 = compat_ptr(arg);
36226
36227- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36228+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36229 get_user(data, &cgc32->buffer) ||
36230 put_user(compat_ptr(data), &cgc->buffer) ||
36231 copy_in_user(&cgc->buflen, &cgc32->buflen,
36232@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36233 err |= __get_user(f->spec1, &uf->spec1);
36234 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36235 err |= __get_user(name, &uf->name);
36236- f->name = compat_ptr(name);
36237+ f->name = (void __force_kernel *)compat_ptr(name);
36238 if (err) {
36239 err = -EFAULT;
36240 goto out;
36241diff --git a/block/genhd.c b/block/genhd.c
36242index 0a536dc..b8f7aca 100644
36243--- a/block/genhd.c
36244+++ b/block/genhd.c
36245@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36246
36247 /*
36248 * Register device numbers dev..(dev+range-1)
36249- * range must be nonzero
36250+ * Noop if @range is zero.
36251 * The hash chain is sorted on range, so that subranges can override.
36252 */
36253 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36254 struct kobject *(*probe)(dev_t, int *, void *),
36255 int (*lock)(dev_t, void *), void *data)
36256 {
36257- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36258+ if (range)
36259+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36260 }
36261
36262 EXPORT_SYMBOL(blk_register_region);
36263
36264+/* undo blk_register_region(), noop if @range is zero */
36265 void blk_unregister_region(dev_t devt, unsigned long range)
36266 {
36267- kobj_unmap(bdev_map, devt, range);
36268+ if (range)
36269+ kobj_unmap(bdev_map, devt, range);
36270 }
36271
36272 EXPORT_SYMBOL(blk_unregister_region);
36273diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36274index 26cb624..a49c3a5 100644
36275--- a/block/partitions/efi.c
36276+++ b/block/partitions/efi.c
36277@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36278 if (!gpt)
36279 return NULL;
36280
36281+ if (!le32_to_cpu(gpt->num_partition_entries))
36282+ return NULL;
36283+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36284+ if (!pte)
36285+ return NULL;
36286+
36287 count = le32_to_cpu(gpt->num_partition_entries) *
36288 le32_to_cpu(gpt->sizeof_partition_entry);
36289- if (!count)
36290- return NULL;
36291- pte = kmalloc(count, GFP_KERNEL);
36292- if (!pte)
36293- return NULL;
36294-
36295 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36296 (u8 *) pte, count) < count) {
36297 kfree(pte);
36298diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36299index e1f71c3..02d295a 100644
36300--- a/block/scsi_ioctl.c
36301+++ b/block/scsi_ioctl.c
36302@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36303 return put_user(0, p);
36304 }
36305
36306-static int sg_get_timeout(struct request_queue *q)
36307+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36308 {
36309 return jiffies_to_clock_t(q->sg_timeout);
36310 }
36311@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36312 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36313 struct sg_io_hdr *hdr, fmode_t mode)
36314 {
36315- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36316+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36317+ unsigned char *cmdptr;
36318+
36319+ if (rq->cmd != rq->__cmd)
36320+ cmdptr = rq->cmd;
36321+ else
36322+ cmdptr = tmpcmd;
36323+
36324+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36325 return -EFAULT;
36326+
36327+ if (cmdptr != rq->cmd)
36328+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36329+
36330 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36331 return -EPERM;
36332
36333@@ -422,6 +434,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36334 int err;
36335 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36336 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36337+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36338+ unsigned char *cmdptr;
36339
36340 if (!sic)
36341 return -EINVAL;
36342@@ -460,9 +474,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36343 */
36344 err = -EFAULT;
36345 rq->cmd_len = cmdlen;
36346- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36347+
36348+ if (rq->cmd != rq->__cmd)
36349+ cmdptr = rq->cmd;
36350+ else
36351+ cmdptr = tmpcmd;
36352+
36353+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36354 goto error;
36355
36356+ if (rq->cmd != cmdptr)
36357+ memcpy(rq->cmd, cmdptr, cmdlen);
36358+
36359 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36360 goto error;
36361
36362diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36363index 650afac1..f3307de 100644
36364--- a/crypto/cryptd.c
36365+++ b/crypto/cryptd.c
36366@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36367
36368 struct cryptd_blkcipher_request_ctx {
36369 crypto_completion_t complete;
36370-};
36371+} __no_const;
36372
36373 struct cryptd_hash_ctx {
36374 struct crypto_shash *child;
36375@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36376
36377 struct cryptd_aead_request_ctx {
36378 crypto_completion_t complete;
36379-};
36380+} __no_const;
36381
36382 static void cryptd_queue_worker(struct work_struct *work);
36383
36384diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36385index c305d41..a96de79 100644
36386--- a/crypto/pcrypt.c
36387+++ b/crypto/pcrypt.c
36388@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36389 int ret;
36390
36391 pinst->kobj.kset = pcrypt_kset;
36392- ret = kobject_add(&pinst->kobj, NULL, name);
36393+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36394 if (!ret)
36395 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36396
36397diff --git a/crypto/zlib.c b/crypto/zlib.c
36398index 0eefa9d..0fa3d29 100644
36399--- a/crypto/zlib.c
36400+++ b/crypto/zlib.c
36401@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36402 zlib_comp_exit(ctx);
36403
36404 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36405- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36406+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36407 : MAX_WBITS;
36408 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36409- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36410+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36411 : DEF_MEM_LEVEL;
36412
36413 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36414diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36415index 3b37676..898edfa 100644
36416--- a/drivers/acpi/acpica/hwxfsleep.c
36417+++ b/drivers/acpi/acpica/hwxfsleep.c
36418@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36419 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36420
36421 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36422- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36423- acpi_hw_extended_sleep},
36424- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36425- acpi_hw_extended_wake_prep},
36426- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36427+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36428+ .extended_function = acpi_hw_extended_sleep},
36429+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36430+ .extended_function = acpi_hw_extended_wake_prep},
36431+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36432+ .extended_function = acpi_hw_extended_wake}
36433 };
36434
36435 /*
36436diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36437index 16129c7..8b675cd 100644
36438--- a/drivers/acpi/apei/apei-internal.h
36439+++ b/drivers/acpi/apei/apei-internal.h
36440@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36441 struct apei_exec_ins_type {
36442 u32 flags;
36443 apei_exec_ins_func_t run;
36444-};
36445+} __do_const;
36446
36447 struct apei_exec_context {
36448 u32 ip;
36449diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36450index e82d097..0c855c1 100644
36451--- a/drivers/acpi/apei/ghes.c
36452+++ b/drivers/acpi/apei/ghes.c
36453@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36454 const struct acpi_hest_generic *generic,
36455 const struct acpi_hest_generic_status *estatus)
36456 {
36457- static atomic_t seqno;
36458+ static atomic_unchecked_t seqno;
36459 unsigned int curr_seqno;
36460 char pfx_seq[64];
36461
36462@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36463 else
36464 pfx = KERN_ERR;
36465 }
36466- curr_seqno = atomic_inc_return(&seqno);
36467+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36468 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36469 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36470 pfx_seq, generic->header.source_id);
36471diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36472index a83e3c6..c3d617f 100644
36473--- a/drivers/acpi/bgrt.c
36474+++ b/drivers/acpi/bgrt.c
36475@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36476 if (!bgrt_image)
36477 return -ENODEV;
36478
36479- bin_attr_image.private = bgrt_image;
36480- bin_attr_image.size = bgrt_image_size;
36481+ pax_open_kernel();
36482+ *(void **)&bin_attr_image.private = bgrt_image;
36483+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36484+ pax_close_kernel();
36485
36486 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36487 if (!bgrt_kobj)
36488diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36489index 9b693d5..8953d54 100644
36490--- a/drivers/acpi/blacklist.c
36491+++ b/drivers/acpi/blacklist.c
36492@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36493 u32 is_critical_error;
36494 };
36495
36496-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36497+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36498
36499 /*
36500 * POLICY: If *anything* doesn't work, put it on the blacklist.
36501@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36502 return 0;
36503 }
36504
36505-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36506+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36507 {
36508 .callback = dmi_disable_osi_vista,
36509 .ident = "Fujitsu Siemens",
36510diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36511index 8b67bd0..b59593e 100644
36512--- a/drivers/acpi/bus.c
36513+++ b/drivers/acpi/bus.c
36514@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36515 }
36516 #endif
36517
36518-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36519+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36520 /*
36521 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36522 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36523@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36524 {}
36525 };
36526 #else
36527-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36528+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36529 {}
36530 };
36531 #endif
36532diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36533index c68e724..e863008 100644
36534--- a/drivers/acpi/custom_method.c
36535+++ b/drivers/acpi/custom_method.c
36536@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36537 struct acpi_table_header table;
36538 acpi_status status;
36539
36540+#ifdef CONFIG_GRKERNSEC_KMEM
36541+ return -EPERM;
36542+#endif
36543+
36544 if (!(*ppos)) {
36545 /* parse the table header to get the table length */
36546 if (count <= sizeof(struct acpi_table_header))
36547diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36548index 735db11..91e07ff 100644
36549--- a/drivers/acpi/device_pm.c
36550+++ b/drivers/acpi/device_pm.c
36551@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36552
36553 #endif /* CONFIG_PM_SLEEP */
36554
36555+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36556+
36557 static struct dev_pm_domain acpi_general_pm_domain = {
36558 .ops = {
36559 .runtime_suspend = acpi_subsys_runtime_suspend,
36560@@ -1041,6 +1043,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36561 .restore_early = acpi_subsys_resume_early,
36562 #endif
36563 },
36564+ .detach = acpi_dev_pm_detach
36565 };
36566
36567 /**
36568@@ -1110,7 +1113,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36569 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36570 }
36571
36572- dev->pm_domain->detach = acpi_dev_pm_detach;
36573 return 0;
36574 }
36575 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36576diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36577index a8dd2f7..e15950e 100644
36578--- a/drivers/acpi/ec.c
36579+++ b/drivers/acpi/ec.c
36580@@ -1242,7 +1242,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36581 return 0;
36582 }
36583
36584-static struct dmi_system_id ec_dmi_table[] __initdata = {
36585+static const struct dmi_system_id ec_dmi_table[] __initconst = {
36586 {
36587 ec_skip_dsdt_scan, "Compal JFL92", {
36588 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36589diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36590index 139d9e4..9a9d799 100644
36591--- a/drivers/acpi/pci_slot.c
36592+++ b/drivers/acpi/pci_slot.c
36593@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36594 return 0;
36595 }
36596
36597-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36598+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36599 /*
36600 * Fujitsu Primequest machines will return 1023 to indicate an
36601 * error if the _SUN method is evaluated on SxFy objects that
36602diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36603index d9f7158..168e742 100644
36604--- a/drivers/acpi/processor_driver.c
36605+++ b/drivers/acpi/processor_driver.c
36606@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36607 return NOTIFY_OK;
36608 }
36609
36610-static struct notifier_block __refdata acpi_cpu_notifier = {
36611+static struct notifier_block __refconst acpi_cpu_notifier = {
36612 .notifier_call = acpi_cpu_soft_notify,
36613 };
36614
36615diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36616index f98db0b..8309c83 100644
36617--- a/drivers/acpi/processor_idle.c
36618+++ b/drivers/acpi/processor_idle.c
36619@@ -912,7 +912,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36620 {
36621 int i, count = CPUIDLE_DRIVER_STATE_START;
36622 struct acpi_processor_cx *cx;
36623- struct cpuidle_state *state;
36624+ cpuidle_state_no_const *state;
36625 struct cpuidle_driver *drv = &acpi_idle_driver;
36626
36627 if (!pr->flags.power_setup_done)
36628diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36629index e5dd808..1eceed1 100644
36630--- a/drivers/acpi/processor_pdc.c
36631+++ b/drivers/acpi/processor_pdc.c
36632@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36633 return 0;
36634 }
36635
36636-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36637+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36638 {
36639 set_no_mwait, "Extensa 5220", {
36640 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36641diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36642index 7f251dd..47b262c 100644
36643--- a/drivers/acpi/sleep.c
36644+++ b/drivers/acpi/sleep.c
36645@@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36646 return 0;
36647 }
36648
36649-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36650+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36651 {
36652 .callback = init_old_suspend_ordering,
36653 .ident = "Abit KN9 (nForce4 variant)",
36654diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36655index 13e577c..cef11ee 100644
36656--- a/drivers/acpi/sysfs.c
36657+++ b/drivers/acpi/sysfs.c
36658@@ -423,11 +423,11 @@ static u32 num_counters;
36659 static struct attribute **all_attrs;
36660 static u32 acpi_gpe_count;
36661
36662-static struct attribute_group interrupt_stats_attr_group = {
36663+static attribute_group_no_const interrupt_stats_attr_group = {
36664 .name = "interrupts",
36665 };
36666
36667-static struct kobj_attribute *counter_attrs;
36668+static kobj_attribute_no_const *counter_attrs;
36669
36670 static void delete_gpe_attr_array(void)
36671 {
36672diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36673index d24fa19..782f1e6 100644
36674--- a/drivers/acpi/thermal.c
36675+++ b/drivers/acpi/thermal.c
36676@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36677 return 0;
36678 }
36679
36680-static struct dmi_system_id thermal_dmi_table[] __initdata = {
36681+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36682 /*
36683 * Award BIOS on this AOpen makes thermal control almost worthless.
36684 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36685diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36686index 26eb70c..4d66ddf 100644
36687--- a/drivers/acpi/video.c
36688+++ b/drivers/acpi/video.c
36689@@ -418,7 +418,7 @@ static int __init video_disable_native_backlight(const struct dmi_system_id *d)
36690 return 0;
36691 }
36692
36693-static struct dmi_system_id video_dmi_table[] __initdata = {
36694+static const struct dmi_system_id video_dmi_table[] __initconst = {
36695 /*
36696 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36697 */
36698diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36699index 61a9c07..ea98fa1 100644
36700--- a/drivers/ata/libahci.c
36701+++ b/drivers/ata/libahci.c
36702@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36703 }
36704 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36705
36706-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36707+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36708 struct ata_taskfile *tf, int is_cmd, u16 flags,
36709 unsigned long timeout_msec)
36710 {
36711diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36712index 23dac3b..89ada44 100644
36713--- a/drivers/ata/libata-core.c
36714+++ b/drivers/ata/libata-core.c
36715@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36716 static void ata_dev_xfermask(struct ata_device *dev);
36717 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36718
36719-atomic_t ata_print_id = ATOMIC_INIT(0);
36720+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36721
36722 struct ata_force_param {
36723 const char *name;
36724@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36725 struct ata_port *ap;
36726 unsigned int tag;
36727
36728- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36729+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36730 ap = qc->ap;
36731
36732 qc->flags = 0;
36733@@ -4797,7 +4797,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36734 struct ata_port *ap;
36735 struct ata_link *link;
36736
36737- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36738+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36739 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36740 ap = qc->ap;
36741 link = qc->dev->link;
36742@@ -5901,6 +5901,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36743 return;
36744
36745 spin_lock(&lock);
36746+ pax_open_kernel();
36747
36748 for (cur = ops->inherits; cur; cur = cur->inherits) {
36749 void **inherit = (void **)cur;
36750@@ -5914,8 +5915,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36751 if (IS_ERR(*pp))
36752 *pp = NULL;
36753
36754- ops->inherits = NULL;
36755+ *(struct ata_port_operations **)&ops->inherits = NULL;
36756
36757+ pax_close_kernel();
36758 spin_unlock(&lock);
36759 }
36760
36761@@ -6111,7 +6113,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36762
36763 /* give ports names and add SCSI hosts */
36764 for (i = 0; i < host->n_ports; i++) {
36765- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36766+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36767 host->ports[i]->local_port_no = i + 1;
36768 }
36769
36770diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36771index b061ba2..fdcd85f 100644
36772--- a/drivers/ata/libata-scsi.c
36773+++ b/drivers/ata/libata-scsi.c
36774@@ -4172,7 +4172,7 @@ int ata_sas_port_init(struct ata_port *ap)
36775
36776 if (rc)
36777 return rc;
36778- ap->print_id = atomic_inc_return(&ata_print_id);
36779+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36780 return 0;
36781 }
36782 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36783diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36784index f840ca1..edd6ef3 100644
36785--- a/drivers/ata/libata.h
36786+++ b/drivers/ata/libata.h
36787@@ -53,7 +53,7 @@ enum {
36788 ATA_DNXFER_QUIET = (1 << 31),
36789 };
36790
36791-extern atomic_t ata_print_id;
36792+extern atomic_unchecked_t ata_print_id;
36793 extern int atapi_passthru16;
36794 extern int libata_fua;
36795 extern int libata_noacpi;
36796diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36797index a9b0c82..207d97d 100644
36798--- a/drivers/ata/pata_arasan_cf.c
36799+++ b/drivers/ata/pata_arasan_cf.c
36800@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36801 /* Handle platform specific quirks */
36802 if (quirk) {
36803 if (quirk & CF_BROKEN_PIO) {
36804- ap->ops->set_piomode = NULL;
36805+ pax_open_kernel();
36806+ *(void **)&ap->ops->set_piomode = NULL;
36807+ pax_close_kernel();
36808 ap->pio_mask = 0;
36809 }
36810 if (quirk & CF_BROKEN_MWDMA)
36811diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36812index f9b983a..887b9d8 100644
36813--- a/drivers/atm/adummy.c
36814+++ b/drivers/atm/adummy.c
36815@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36816 vcc->pop(vcc, skb);
36817 else
36818 dev_kfree_skb_any(skb);
36819- atomic_inc(&vcc->stats->tx);
36820+ atomic_inc_unchecked(&vcc->stats->tx);
36821
36822 return 0;
36823 }
36824diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36825index f1a9198..f466a4a 100644
36826--- a/drivers/atm/ambassador.c
36827+++ b/drivers/atm/ambassador.c
36828@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36829 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36830
36831 // VC layer stats
36832- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36833+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36834
36835 // free the descriptor
36836 kfree (tx_descr);
36837@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36838 dump_skb ("<<<", vc, skb);
36839
36840 // VC layer stats
36841- atomic_inc(&atm_vcc->stats->rx);
36842+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36843 __net_timestamp(skb);
36844 // end of our responsibility
36845 atm_vcc->push (atm_vcc, skb);
36846@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36847 } else {
36848 PRINTK (KERN_INFO, "dropped over-size frame");
36849 // should we count this?
36850- atomic_inc(&atm_vcc->stats->rx_drop);
36851+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36852 }
36853
36854 } else {
36855@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36856 }
36857
36858 if (check_area (skb->data, skb->len)) {
36859- atomic_inc(&atm_vcc->stats->tx_err);
36860+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36861 return -ENOMEM; // ?
36862 }
36863
36864diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36865index 480fa6f..947067c 100644
36866--- a/drivers/atm/atmtcp.c
36867+++ b/drivers/atm/atmtcp.c
36868@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36869 if (vcc->pop) vcc->pop(vcc,skb);
36870 else dev_kfree_skb(skb);
36871 if (dev_data) return 0;
36872- atomic_inc(&vcc->stats->tx_err);
36873+ atomic_inc_unchecked(&vcc->stats->tx_err);
36874 return -ENOLINK;
36875 }
36876 size = skb->len+sizeof(struct atmtcp_hdr);
36877@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36878 if (!new_skb) {
36879 if (vcc->pop) vcc->pop(vcc,skb);
36880 else dev_kfree_skb(skb);
36881- atomic_inc(&vcc->stats->tx_err);
36882+ atomic_inc_unchecked(&vcc->stats->tx_err);
36883 return -ENOBUFS;
36884 }
36885 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36886@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36887 if (vcc->pop) vcc->pop(vcc,skb);
36888 else dev_kfree_skb(skb);
36889 out_vcc->push(out_vcc,new_skb);
36890- atomic_inc(&vcc->stats->tx);
36891- atomic_inc(&out_vcc->stats->rx);
36892+ atomic_inc_unchecked(&vcc->stats->tx);
36893+ atomic_inc_unchecked(&out_vcc->stats->rx);
36894 return 0;
36895 }
36896
36897@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36898 read_unlock(&vcc_sklist_lock);
36899 if (!out_vcc) {
36900 result = -EUNATCH;
36901- atomic_inc(&vcc->stats->tx_err);
36902+ atomic_inc_unchecked(&vcc->stats->tx_err);
36903 goto done;
36904 }
36905 skb_pull(skb,sizeof(struct atmtcp_hdr));
36906@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36907 __net_timestamp(new_skb);
36908 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36909 out_vcc->push(out_vcc,new_skb);
36910- atomic_inc(&vcc->stats->tx);
36911- atomic_inc(&out_vcc->stats->rx);
36912+ atomic_inc_unchecked(&vcc->stats->tx);
36913+ atomic_inc_unchecked(&out_vcc->stats->rx);
36914 done:
36915 if (vcc->pop) vcc->pop(vcc,skb);
36916 else dev_kfree_skb(skb);
36917diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36918index 6339efd..2b441d5 100644
36919--- a/drivers/atm/eni.c
36920+++ b/drivers/atm/eni.c
36921@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36922 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36923 vcc->dev->number);
36924 length = 0;
36925- atomic_inc(&vcc->stats->rx_err);
36926+ atomic_inc_unchecked(&vcc->stats->rx_err);
36927 }
36928 else {
36929 length = ATM_CELL_SIZE-1; /* no HEC */
36930@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36931 size);
36932 }
36933 eff = length = 0;
36934- atomic_inc(&vcc->stats->rx_err);
36935+ atomic_inc_unchecked(&vcc->stats->rx_err);
36936 }
36937 else {
36938 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36939@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36940 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36941 vcc->dev->number,vcc->vci,length,size << 2,descr);
36942 length = eff = 0;
36943- atomic_inc(&vcc->stats->rx_err);
36944+ atomic_inc_unchecked(&vcc->stats->rx_err);
36945 }
36946 }
36947 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36948@@ -770,7 +770,7 @@ rx_dequeued++;
36949 vcc->push(vcc,skb);
36950 pushed++;
36951 }
36952- atomic_inc(&vcc->stats->rx);
36953+ atomic_inc_unchecked(&vcc->stats->rx);
36954 }
36955 wake_up(&eni_dev->rx_wait);
36956 }
36957@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36958 DMA_TO_DEVICE);
36959 if (vcc->pop) vcc->pop(vcc,skb);
36960 else dev_kfree_skb_irq(skb);
36961- atomic_inc(&vcc->stats->tx);
36962+ atomic_inc_unchecked(&vcc->stats->tx);
36963 wake_up(&eni_dev->tx_wait);
36964 dma_complete++;
36965 }
36966diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36967index 82f2ae0..f205c02 100644
36968--- a/drivers/atm/firestream.c
36969+++ b/drivers/atm/firestream.c
36970@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36971 }
36972 }
36973
36974- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36975+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36976
36977 fs_dprintk (FS_DEBUG_TXMEM, "i");
36978 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36979@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36980 #endif
36981 skb_put (skb, qe->p1 & 0xffff);
36982 ATM_SKB(skb)->vcc = atm_vcc;
36983- atomic_inc(&atm_vcc->stats->rx);
36984+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36985 __net_timestamp(skb);
36986 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36987 atm_vcc->push (atm_vcc, skb);
36988@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36989 kfree (pe);
36990 }
36991 if (atm_vcc)
36992- atomic_inc(&atm_vcc->stats->rx_drop);
36993+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36994 break;
36995 case 0x1f: /* Reassembly abort: no buffers. */
36996 /* Silently increment error counter. */
36997 if (atm_vcc)
36998- atomic_inc(&atm_vcc->stats->rx_drop);
36999+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37000 break;
37001 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
37002 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
37003diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37004index 75dde90..4309ead 100644
37005--- a/drivers/atm/fore200e.c
37006+++ b/drivers/atm/fore200e.c
37007@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37008 #endif
37009 /* check error condition */
37010 if (*entry->status & STATUS_ERROR)
37011- atomic_inc(&vcc->stats->tx_err);
37012+ atomic_inc_unchecked(&vcc->stats->tx_err);
37013 else
37014- atomic_inc(&vcc->stats->tx);
37015+ atomic_inc_unchecked(&vcc->stats->tx);
37016 }
37017 }
37018
37019@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37020 if (skb == NULL) {
37021 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37022
37023- atomic_inc(&vcc->stats->rx_drop);
37024+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37025 return -ENOMEM;
37026 }
37027
37028@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37029
37030 dev_kfree_skb_any(skb);
37031
37032- atomic_inc(&vcc->stats->rx_drop);
37033+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37034 return -ENOMEM;
37035 }
37036
37037 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37038
37039 vcc->push(vcc, skb);
37040- atomic_inc(&vcc->stats->rx);
37041+ atomic_inc_unchecked(&vcc->stats->rx);
37042
37043 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37044
37045@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37046 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37047 fore200e->atm_dev->number,
37048 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37049- atomic_inc(&vcc->stats->rx_err);
37050+ atomic_inc_unchecked(&vcc->stats->rx_err);
37051 }
37052 }
37053
37054@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37055 goto retry_here;
37056 }
37057
37058- atomic_inc(&vcc->stats->tx_err);
37059+ atomic_inc_unchecked(&vcc->stats->tx_err);
37060
37061 fore200e->tx_sat++;
37062 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37063diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37064index 93dca2e..c5daa69 100644
37065--- a/drivers/atm/he.c
37066+++ b/drivers/atm/he.c
37067@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37068
37069 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37070 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37071- atomic_inc(&vcc->stats->rx_drop);
37072+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37073 goto return_host_buffers;
37074 }
37075
37076@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37077 RBRQ_LEN_ERR(he_dev->rbrq_head)
37078 ? "LEN_ERR" : "",
37079 vcc->vpi, vcc->vci);
37080- atomic_inc(&vcc->stats->rx_err);
37081+ atomic_inc_unchecked(&vcc->stats->rx_err);
37082 goto return_host_buffers;
37083 }
37084
37085@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37086 vcc->push(vcc, skb);
37087 spin_lock(&he_dev->global_lock);
37088
37089- atomic_inc(&vcc->stats->rx);
37090+ atomic_inc_unchecked(&vcc->stats->rx);
37091
37092 return_host_buffers:
37093 ++pdus_assembled;
37094@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37095 tpd->vcc->pop(tpd->vcc, tpd->skb);
37096 else
37097 dev_kfree_skb_any(tpd->skb);
37098- atomic_inc(&tpd->vcc->stats->tx_err);
37099+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37100 }
37101 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37102 return;
37103@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37104 vcc->pop(vcc, skb);
37105 else
37106 dev_kfree_skb_any(skb);
37107- atomic_inc(&vcc->stats->tx_err);
37108+ atomic_inc_unchecked(&vcc->stats->tx_err);
37109 return -EINVAL;
37110 }
37111
37112@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37113 vcc->pop(vcc, skb);
37114 else
37115 dev_kfree_skb_any(skb);
37116- atomic_inc(&vcc->stats->tx_err);
37117+ atomic_inc_unchecked(&vcc->stats->tx_err);
37118 return -EINVAL;
37119 }
37120 #endif
37121@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37122 vcc->pop(vcc, skb);
37123 else
37124 dev_kfree_skb_any(skb);
37125- atomic_inc(&vcc->stats->tx_err);
37126+ atomic_inc_unchecked(&vcc->stats->tx_err);
37127 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37128 return -ENOMEM;
37129 }
37130@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37131 vcc->pop(vcc, skb);
37132 else
37133 dev_kfree_skb_any(skb);
37134- atomic_inc(&vcc->stats->tx_err);
37135+ atomic_inc_unchecked(&vcc->stats->tx_err);
37136 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37137 return -ENOMEM;
37138 }
37139@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37140 __enqueue_tpd(he_dev, tpd, cid);
37141 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37142
37143- atomic_inc(&vcc->stats->tx);
37144+ atomic_inc_unchecked(&vcc->stats->tx);
37145
37146 return 0;
37147 }
37148diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37149index 527bbd5..96570c8 100644
37150--- a/drivers/atm/horizon.c
37151+++ b/drivers/atm/horizon.c
37152@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37153 {
37154 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37155 // VC layer stats
37156- atomic_inc(&vcc->stats->rx);
37157+ atomic_inc_unchecked(&vcc->stats->rx);
37158 __net_timestamp(skb);
37159 // end of our responsibility
37160 vcc->push (vcc, skb);
37161@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37162 dev->tx_iovec = NULL;
37163
37164 // VC layer stats
37165- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37166+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37167
37168 // free the skb
37169 hrz_kfree_skb (skb);
37170diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37171index 074616b..d6b3d5f 100644
37172--- a/drivers/atm/idt77252.c
37173+++ b/drivers/atm/idt77252.c
37174@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37175 else
37176 dev_kfree_skb(skb);
37177
37178- atomic_inc(&vcc->stats->tx);
37179+ atomic_inc_unchecked(&vcc->stats->tx);
37180 }
37181
37182 atomic_dec(&scq->used);
37183@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37184 if ((sb = dev_alloc_skb(64)) == NULL) {
37185 printk("%s: Can't allocate buffers for aal0.\n",
37186 card->name);
37187- atomic_add(i, &vcc->stats->rx_drop);
37188+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37189 break;
37190 }
37191 if (!atm_charge(vcc, sb->truesize)) {
37192 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37193 card->name);
37194- atomic_add(i - 1, &vcc->stats->rx_drop);
37195+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37196 dev_kfree_skb(sb);
37197 break;
37198 }
37199@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37200 ATM_SKB(sb)->vcc = vcc;
37201 __net_timestamp(sb);
37202 vcc->push(vcc, sb);
37203- atomic_inc(&vcc->stats->rx);
37204+ atomic_inc_unchecked(&vcc->stats->rx);
37205
37206 cell += ATM_CELL_PAYLOAD;
37207 }
37208@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37209 "(CDC: %08x)\n",
37210 card->name, len, rpp->len, readl(SAR_REG_CDC));
37211 recycle_rx_pool_skb(card, rpp);
37212- atomic_inc(&vcc->stats->rx_err);
37213+ atomic_inc_unchecked(&vcc->stats->rx_err);
37214 return;
37215 }
37216 if (stat & SAR_RSQE_CRC) {
37217 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37218 recycle_rx_pool_skb(card, rpp);
37219- atomic_inc(&vcc->stats->rx_err);
37220+ atomic_inc_unchecked(&vcc->stats->rx_err);
37221 return;
37222 }
37223 if (skb_queue_len(&rpp->queue) > 1) {
37224@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37225 RXPRINTK("%s: Can't alloc RX skb.\n",
37226 card->name);
37227 recycle_rx_pool_skb(card, rpp);
37228- atomic_inc(&vcc->stats->rx_err);
37229+ atomic_inc_unchecked(&vcc->stats->rx_err);
37230 return;
37231 }
37232 if (!atm_charge(vcc, skb->truesize)) {
37233@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37234 __net_timestamp(skb);
37235
37236 vcc->push(vcc, skb);
37237- atomic_inc(&vcc->stats->rx);
37238+ atomic_inc_unchecked(&vcc->stats->rx);
37239
37240 return;
37241 }
37242@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37243 __net_timestamp(skb);
37244
37245 vcc->push(vcc, skb);
37246- atomic_inc(&vcc->stats->rx);
37247+ atomic_inc_unchecked(&vcc->stats->rx);
37248
37249 if (skb->truesize > SAR_FB_SIZE_3)
37250 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37251@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37252 if (vcc->qos.aal != ATM_AAL0) {
37253 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37254 card->name, vpi, vci);
37255- atomic_inc(&vcc->stats->rx_drop);
37256+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37257 goto drop;
37258 }
37259
37260 if ((sb = dev_alloc_skb(64)) == NULL) {
37261 printk("%s: Can't allocate buffers for AAL0.\n",
37262 card->name);
37263- atomic_inc(&vcc->stats->rx_err);
37264+ atomic_inc_unchecked(&vcc->stats->rx_err);
37265 goto drop;
37266 }
37267
37268@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37269 ATM_SKB(sb)->vcc = vcc;
37270 __net_timestamp(sb);
37271 vcc->push(vcc, sb);
37272- atomic_inc(&vcc->stats->rx);
37273+ atomic_inc_unchecked(&vcc->stats->rx);
37274
37275 drop:
37276 skb_pull(queue, 64);
37277@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37278
37279 if (vc == NULL) {
37280 printk("%s: NULL connection in send().\n", card->name);
37281- atomic_inc(&vcc->stats->tx_err);
37282+ atomic_inc_unchecked(&vcc->stats->tx_err);
37283 dev_kfree_skb(skb);
37284 return -EINVAL;
37285 }
37286 if (!test_bit(VCF_TX, &vc->flags)) {
37287 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37288- atomic_inc(&vcc->stats->tx_err);
37289+ atomic_inc_unchecked(&vcc->stats->tx_err);
37290 dev_kfree_skb(skb);
37291 return -EINVAL;
37292 }
37293@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37294 break;
37295 default:
37296 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37297- atomic_inc(&vcc->stats->tx_err);
37298+ atomic_inc_unchecked(&vcc->stats->tx_err);
37299 dev_kfree_skb(skb);
37300 return -EINVAL;
37301 }
37302
37303 if (skb_shinfo(skb)->nr_frags != 0) {
37304 printk("%s: No scatter-gather yet.\n", card->name);
37305- atomic_inc(&vcc->stats->tx_err);
37306+ atomic_inc_unchecked(&vcc->stats->tx_err);
37307 dev_kfree_skb(skb);
37308 return -EINVAL;
37309 }
37310@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37311
37312 err = queue_skb(card, vc, skb, oam);
37313 if (err) {
37314- atomic_inc(&vcc->stats->tx_err);
37315+ atomic_inc_unchecked(&vcc->stats->tx_err);
37316 dev_kfree_skb(skb);
37317 return err;
37318 }
37319@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37320 skb = dev_alloc_skb(64);
37321 if (!skb) {
37322 printk("%s: Out of memory in send_oam().\n", card->name);
37323- atomic_inc(&vcc->stats->tx_err);
37324+ atomic_inc_unchecked(&vcc->stats->tx_err);
37325 return -ENOMEM;
37326 }
37327 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37328diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37329index 924f8e2..3375a3e 100644
37330--- a/drivers/atm/iphase.c
37331+++ b/drivers/atm/iphase.c
37332@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37333 status = (u_short) (buf_desc_ptr->desc_mode);
37334 if (status & (RX_CER | RX_PTE | RX_OFL))
37335 {
37336- atomic_inc(&vcc->stats->rx_err);
37337+ atomic_inc_unchecked(&vcc->stats->rx_err);
37338 IF_ERR(printk("IA: bad packet, dropping it");)
37339 if (status & RX_CER) {
37340 IF_ERR(printk(" cause: packet CRC error\n");)
37341@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37342 len = dma_addr - buf_addr;
37343 if (len > iadev->rx_buf_sz) {
37344 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37345- atomic_inc(&vcc->stats->rx_err);
37346+ atomic_inc_unchecked(&vcc->stats->rx_err);
37347 goto out_free_desc;
37348 }
37349
37350@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37351 ia_vcc = INPH_IA_VCC(vcc);
37352 if (ia_vcc == NULL)
37353 {
37354- atomic_inc(&vcc->stats->rx_err);
37355+ atomic_inc_unchecked(&vcc->stats->rx_err);
37356 atm_return(vcc, skb->truesize);
37357 dev_kfree_skb_any(skb);
37358 goto INCR_DLE;
37359@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37360 if ((length > iadev->rx_buf_sz) || (length >
37361 (skb->len - sizeof(struct cpcs_trailer))))
37362 {
37363- atomic_inc(&vcc->stats->rx_err);
37364+ atomic_inc_unchecked(&vcc->stats->rx_err);
37365 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37366 length, skb->len);)
37367 atm_return(vcc, skb->truesize);
37368@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37369
37370 IF_RX(printk("rx_dle_intr: skb push");)
37371 vcc->push(vcc,skb);
37372- atomic_inc(&vcc->stats->rx);
37373+ atomic_inc_unchecked(&vcc->stats->rx);
37374 iadev->rx_pkt_cnt++;
37375 }
37376 INCR_DLE:
37377@@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37378 {
37379 struct k_sonet_stats *stats;
37380 stats = &PRIV(_ia_dev[board])->sonet_stats;
37381- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37382- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37383- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37384- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37385- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37386- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37387- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37388- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37389- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37390+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37391+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37392+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37393+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37394+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37395+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37396+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37397+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37398+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37399 }
37400 ia_cmds.status = 0;
37401 break;
37402@@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37403 if ((desc == 0) || (desc > iadev->num_tx_desc))
37404 {
37405 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37406- atomic_inc(&vcc->stats->tx);
37407+ atomic_inc_unchecked(&vcc->stats->tx);
37408 if (vcc->pop)
37409 vcc->pop(vcc, skb);
37410 else
37411@@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37412 ATM_DESC(skb) = vcc->vci;
37413 skb_queue_tail(&iadev->tx_dma_q, skb);
37414
37415- atomic_inc(&vcc->stats->tx);
37416+ atomic_inc_unchecked(&vcc->stats->tx);
37417 iadev->tx_pkt_cnt++;
37418 /* Increment transaction counter */
37419 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37420
37421 #if 0
37422 /* add flow control logic */
37423- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37424+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37425 if (iavcc->vc_desc_cnt > 10) {
37426 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37427 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37428diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37429index ce43ae3..969de38 100644
37430--- a/drivers/atm/lanai.c
37431+++ b/drivers/atm/lanai.c
37432@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37433 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37434 lanai_endtx(lanai, lvcc);
37435 lanai_free_skb(lvcc->tx.atmvcc, skb);
37436- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37437+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37438 }
37439
37440 /* Try to fill the buffer - don't call unless there is backlog */
37441@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37442 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37443 __net_timestamp(skb);
37444 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37445- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37446+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37447 out:
37448 lvcc->rx.buf.ptr = end;
37449 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37450@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37451 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37452 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37453 lanai->stats.service_rxnotaal5++;
37454- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37455+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37456 return 0;
37457 }
37458 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37459@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37460 int bytes;
37461 read_unlock(&vcc_sklist_lock);
37462 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37463- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37464+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37465 lvcc->stats.x.aal5.service_trash++;
37466 bytes = (SERVICE_GET_END(s) * 16) -
37467 (((unsigned long) lvcc->rx.buf.ptr) -
37468@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37469 }
37470 if (s & SERVICE_STREAM) {
37471 read_unlock(&vcc_sklist_lock);
37472- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37473+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37474 lvcc->stats.x.aal5.service_stream++;
37475 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37476 "PDU on VCI %d!\n", lanai->number, vci);
37477@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37478 return 0;
37479 }
37480 DPRINTK("got rx crc error on vci %d\n", vci);
37481- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37482+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37483 lvcc->stats.x.aal5.service_rxcrc++;
37484 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37485 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37486diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37487index b7e1cc0..eb336bfe 100644
37488--- a/drivers/atm/nicstar.c
37489+++ b/drivers/atm/nicstar.c
37490@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37491 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37492 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37493 card->index);
37494- atomic_inc(&vcc->stats->tx_err);
37495+ atomic_inc_unchecked(&vcc->stats->tx_err);
37496 dev_kfree_skb_any(skb);
37497 return -EINVAL;
37498 }
37499@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37500 if (!vc->tx) {
37501 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37502 card->index);
37503- atomic_inc(&vcc->stats->tx_err);
37504+ atomic_inc_unchecked(&vcc->stats->tx_err);
37505 dev_kfree_skb_any(skb);
37506 return -EINVAL;
37507 }
37508@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37509 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37510 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37511 card->index);
37512- atomic_inc(&vcc->stats->tx_err);
37513+ atomic_inc_unchecked(&vcc->stats->tx_err);
37514 dev_kfree_skb_any(skb);
37515 return -EINVAL;
37516 }
37517
37518 if (skb_shinfo(skb)->nr_frags != 0) {
37519 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37520- atomic_inc(&vcc->stats->tx_err);
37521+ atomic_inc_unchecked(&vcc->stats->tx_err);
37522 dev_kfree_skb_any(skb);
37523 return -EINVAL;
37524 }
37525@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37526 }
37527
37528 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37529- atomic_inc(&vcc->stats->tx_err);
37530+ atomic_inc_unchecked(&vcc->stats->tx_err);
37531 dev_kfree_skb_any(skb);
37532 return -EIO;
37533 }
37534- atomic_inc(&vcc->stats->tx);
37535+ atomic_inc_unchecked(&vcc->stats->tx);
37536
37537 return 0;
37538 }
37539@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37540 printk
37541 ("nicstar%d: Can't allocate buffers for aal0.\n",
37542 card->index);
37543- atomic_add(i, &vcc->stats->rx_drop);
37544+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37545 break;
37546 }
37547 if (!atm_charge(vcc, sb->truesize)) {
37548 RXPRINTK
37549 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37550 card->index);
37551- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37552+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37553 dev_kfree_skb_any(sb);
37554 break;
37555 }
37556@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37557 ATM_SKB(sb)->vcc = vcc;
37558 __net_timestamp(sb);
37559 vcc->push(vcc, sb);
37560- atomic_inc(&vcc->stats->rx);
37561+ atomic_inc_unchecked(&vcc->stats->rx);
37562 cell += ATM_CELL_PAYLOAD;
37563 }
37564
37565@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37566 if (iovb == NULL) {
37567 printk("nicstar%d: Out of iovec buffers.\n",
37568 card->index);
37569- atomic_inc(&vcc->stats->rx_drop);
37570+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37571 recycle_rx_buf(card, skb);
37572 return;
37573 }
37574@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37575 small or large buffer itself. */
37576 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37577 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37578- atomic_inc(&vcc->stats->rx_err);
37579+ atomic_inc_unchecked(&vcc->stats->rx_err);
37580 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37581 NS_MAX_IOVECS);
37582 NS_PRV_IOVCNT(iovb) = 0;
37583@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37584 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37585 card->index);
37586 which_list(card, skb);
37587- atomic_inc(&vcc->stats->rx_err);
37588+ atomic_inc_unchecked(&vcc->stats->rx_err);
37589 recycle_rx_buf(card, skb);
37590 vc->rx_iov = NULL;
37591 recycle_iov_buf(card, iovb);
37592@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37593 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37594 card->index);
37595 which_list(card, skb);
37596- atomic_inc(&vcc->stats->rx_err);
37597+ atomic_inc_unchecked(&vcc->stats->rx_err);
37598 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37599 NS_PRV_IOVCNT(iovb));
37600 vc->rx_iov = NULL;
37601@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37602 printk(" - PDU size mismatch.\n");
37603 else
37604 printk(".\n");
37605- atomic_inc(&vcc->stats->rx_err);
37606+ atomic_inc_unchecked(&vcc->stats->rx_err);
37607 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37608 NS_PRV_IOVCNT(iovb));
37609 vc->rx_iov = NULL;
37610@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37611 /* skb points to a small buffer */
37612 if (!atm_charge(vcc, skb->truesize)) {
37613 push_rxbufs(card, skb);
37614- atomic_inc(&vcc->stats->rx_drop);
37615+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37616 } else {
37617 skb_put(skb, len);
37618 dequeue_sm_buf(card, skb);
37619@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37620 ATM_SKB(skb)->vcc = vcc;
37621 __net_timestamp(skb);
37622 vcc->push(vcc, skb);
37623- atomic_inc(&vcc->stats->rx);
37624+ atomic_inc_unchecked(&vcc->stats->rx);
37625 }
37626 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37627 struct sk_buff *sb;
37628@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37629 if (len <= NS_SMBUFSIZE) {
37630 if (!atm_charge(vcc, sb->truesize)) {
37631 push_rxbufs(card, sb);
37632- atomic_inc(&vcc->stats->rx_drop);
37633+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37634 } else {
37635 skb_put(sb, len);
37636 dequeue_sm_buf(card, sb);
37637@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37638 ATM_SKB(sb)->vcc = vcc;
37639 __net_timestamp(sb);
37640 vcc->push(vcc, sb);
37641- atomic_inc(&vcc->stats->rx);
37642+ atomic_inc_unchecked(&vcc->stats->rx);
37643 }
37644
37645 push_rxbufs(card, skb);
37646@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37647
37648 if (!atm_charge(vcc, skb->truesize)) {
37649 push_rxbufs(card, skb);
37650- atomic_inc(&vcc->stats->rx_drop);
37651+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37652 } else {
37653 dequeue_lg_buf(card, skb);
37654 #ifdef NS_USE_DESTRUCTORS
37655@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37656 ATM_SKB(skb)->vcc = vcc;
37657 __net_timestamp(skb);
37658 vcc->push(vcc, skb);
37659- atomic_inc(&vcc->stats->rx);
37660+ atomic_inc_unchecked(&vcc->stats->rx);
37661 }
37662
37663 push_rxbufs(card, sb);
37664@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37665 printk
37666 ("nicstar%d: Out of huge buffers.\n",
37667 card->index);
37668- atomic_inc(&vcc->stats->rx_drop);
37669+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37670 recycle_iovec_rx_bufs(card,
37671 (struct iovec *)
37672 iovb->data,
37673@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37674 card->hbpool.count++;
37675 } else
37676 dev_kfree_skb_any(hb);
37677- atomic_inc(&vcc->stats->rx_drop);
37678+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37679 } else {
37680 /* Copy the small buffer to the huge buffer */
37681 sb = (struct sk_buff *)iov->iov_base;
37682@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37683 #endif /* NS_USE_DESTRUCTORS */
37684 __net_timestamp(hb);
37685 vcc->push(vcc, hb);
37686- atomic_inc(&vcc->stats->rx);
37687+ atomic_inc_unchecked(&vcc->stats->rx);
37688 }
37689 }
37690
37691diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37692index 74e18b0..f16afa0 100644
37693--- a/drivers/atm/solos-pci.c
37694+++ b/drivers/atm/solos-pci.c
37695@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37696 }
37697 atm_charge(vcc, skb->truesize);
37698 vcc->push(vcc, skb);
37699- atomic_inc(&vcc->stats->rx);
37700+ atomic_inc_unchecked(&vcc->stats->rx);
37701 break;
37702
37703 case PKT_STATUS:
37704@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37705 vcc = SKB_CB(oldskb)->vcc;
37706
37707 if (vcc) {
37708- atomic_inc(&vcc->stats->tx);
37709+ atomic_inc_unchecked(&vcc->stats->tx);
37710 solos_pop(vcc, oldskb);
37711 } else {
37712 dev_kfree_skb_irq(oldskb);
37713diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37714index 0215934..ce9f5b1 100644
37715--- a/drivers/atm/suni.c
37716+++ b/drivers/atm/suni.c
37717@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37718
37719
37720 #define ADD_LIMITED(s,v) \
37721- atomic_add((v),&stats->s); \
37722- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37723+ atomic_add_unchecked((v),&stats->s); \
37724+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37725
37726
37727 static void suni_hz(unsigned long from_timer)
37728diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37729index 5120a96..e2572bd 100644
37730--- a/drivers/atm/uPD98402.c
37731+++ b/drivers/atm/uPD98402.c
37732@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37733 struct sonet_stats tmp;
37734 int error = 0;
37735
37736- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37737+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37738 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37739 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37740 if (zero && !error) {
37741@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37742
37743
37744 #define ADD_LIMITED(s,v) \
37745- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37746- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37747- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37748+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37749+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37750+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37751
37752
37753 static void stat_event(struct atm_dev *dev)
37754@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37755 if (reason & uPD98402_INT_PFM) stat_event(dev);
37756 if (reason & uPD98402_INT_PCO) {
37757 (void) GET(PCOCR); /* clear interrupt cause */
37758- atomic_add(GET(HECCT),
37759+ atomic_add_unchecked(GET(HECCT),
37760 &PRIV(dev)->sonet_stats.uncorr_hcs);
37761 }
37762 if ((reason & uPD98402_INT_RFO) &&
37763@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37764 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37765 uPD98402_INT_LOS),PIMR); /* enable them */
37766 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37767- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37768- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37769- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37770+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37771+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37772+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37773 return 0;
37774 }
37775
37776diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37777index cecfb94..87009ec 100644
37778--- a/drivers/atm/zatm.c
37779+++ b/drivers/atm/zatm.c
37780@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37781 }
37782 if (!size) {
37783 dev_kfree_skb_irq(skb);
37784- if (vcc) atomic_inc(&vcc->stats->rx_err);
37785+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37786 continue;
37787 }
37788 if (!atm_charge(vcc,skb->truesize)) {
37789@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37790 skb->len = size;
37791 ATM_SKB(skb)->vcc = vcc;
37792 vcc->push(vcc,skb);
37793- atomic_inc(&vcc->stats->rx);
37794+ atomic_inc_unchecked(&vcc->stats->rx);
37795 }
37796 zout(pos & 0xffff,MTA(mbx));
37797 #if 0 /* probably a stupid idea */
37798@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37799 skb_queue_head(&zatm_vcc->backlog,skb);
37800 break;
37801 }
37802- atomic_inc(&vcc->stats->tx);
37803+ atomic_inc_unchecked(&vcc->stats->tx);
37804 wake_up(&zatm_vcc->tx_wait);
37805 }
37806
37807diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37808index 79bc203..fa3945b 100644
37809--- a/drivers/base/bus.c
37810+++ b/drivers/base/bus.c
37811@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37812 return -EINVAL;
37813
37814 mutex_lock(&subsys->p->mutex);
37815- list_add_tail(&sif->node, &subsys->p->interfaces);
37816+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37817 if (sif->add_dev) {
37818 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37819 while ((dev = subsys_dev_iter_next(&iter)))
37820@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37821 subsys = sif->subsys;
37822
37823 mutex_lock(&subsys->p->mutex);
37824- list_del_init(&sif->node);
37825+ pax_list_del_init((struct list_head *)&sif->node);
37826 if (sif->remove_dev) {
37827 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37828 while ((dev = subsys_dev_iter_next(&iter)))
37829diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37830index 25798db..15f130e 100644
37831--- a/drivers/base/devtmpfs.c
37832+++ b/drivers/base/devtmpfs.c
37833@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37834 if (!thread)
37835 return 0;
37836
37837- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37838+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37839 if (err)
37840 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37841 else
37842@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37843 *err = sys_unshare(CLONE_NEWNS);
37844 if (*err)
37845 goto out;
37846- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37847+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37848 if (*err)
37849 goto out;
37850- sys_chdir("/.."); /* will traverse into overmounted root */
37851- sys_chroot(".");
37852+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37853+ sys_chroot((char __force_user *)".");
37854 complete(&setup_done);
37855 while (1) {
37856 spin_lock(&req_lock);
37857diff --git a/drivers/base/node.c b/drivers/base/node.c
37858index 36fabe43..8cfc112 100644
37859--- a/drivers/base/node.c
37860+++ b/drivers/base/node.c
37861@@ -615,7 +615,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37862 struct node_attr {
37863 struct device_attribute attr;
37864 enum node_states state;
37865-};
37866+} __do_const;
37867
37868 static ssize_t show_node_state(struct device *dev,
37869 struct device_attribute *attr, char *buf)
37870diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37871index 45937f8..b9a342e 100644
37872--- a/drivers/base/power/domain.c
37873+++ b/drivers/base/power/domain.c
37874@@ -1698,7 +1698,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37875 {
37876 struct cpuidle_driver *cpuidle_drv;
37877 struct gpd_cpuidle_data *cpuidle_data;
37878- struct cpuidle_state *idle_state;
37879+ cpuidle_state_no_const *idle_state;
37880 int ret = 0;
37881
37882 if (IS_ERR_OR_NULL(genpd) || state < 0)
37883@@ -1766,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37884 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37885 {
37886 struct gpd_cpuidle_data *cpuidle_data;
37887- struct cpuidle_state *idle_state;
37888+ cpuidle_state_no_const *idle_state;
37889 int ret = 0;
37890
37891 if (IS_ERR_OR_NULL(genpd))
37892@@ -2195,7 +2195,10 @@ int genpd_dev_pm_attach(struct device *dev)
37893 return ret;
37894 }
37895
37896- dev->pm_domain->detach = genpd_dev_pm_detach;
37897+ pax_open_kernel();
37898+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37899+ pax_close_kernel();
37900+
37901 pm_genpd_poweron(pd);
37902
37903 return 0;
37904diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37905index d2be3f9..0a3167a 100644
37906--- a/drivers/base/power/sysfs.c
37907+++ b/drivers/base/power/sysfs.c
37908@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37909 return -EIO;
37910 }
37911 }
37912- return sprintf(buf, p);
37913+ return sprintf(buf, "%s", p);
37914 }
37915
37916 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37917diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37918index aab7158..b172db2 100644
37919--- a/drivers/base/power/wakeup.c
37920+++ b/drivers/base/power/wakeup.c
37921@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37922 * They need to be modified together atomically, so it's better to use one
37923 * atomic variable to hold them both.
37924 */
37925-static atomic_t combined_event_count = ATOMIC_INIT(0);
37926+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37927
37928 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37929 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37930
37931 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37932 {
37933- unsigned int comb = atomic_read(&combined_event_count);
37934+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37935
37936 *cnt = (comb >> IN_PROGRESS_BITS);
37937 *inpr = comb & MAX_IN_PROGRESS;
37938@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37939 ws->start_prevent_time = ws->last_time;
37940
37941 /* Increment the counter of events in progress. */
37942- cec = atomic_inc_return(&combined_event_count);
37943+ cec = atomic_inc_return_unchecked(&combined_event_count);
37944
37945 trace_wakeup_source_activate(ws->name, cec);
37946 }
37947@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37948 * Increment the counter of registered wakeup events and decrement the
37949 * couter of wakeup events in progress simultaneously.
37950 */
37951- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37952+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37953 trace_wakeup_source_deactivate(ws->name, cec);
37954
37955 split_counters(&cnt, &inpr);
37956diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37957index 8d98a32..61d3165 100644
37958--- a/drivers/base/syscore.c
37959+++ b/drivers/base/syscore.c
37960@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37961 void register_syscore_ops(struct syscore_ops *ops)
37962 {
37963 mutex_lock(&syscore_ops_lock);
37964- list_add_tail(&ops->node, &syscore_ops_list);
37965+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37966 mutex_unlock(&syscore_ops_lock);
37967 }
37968 EXPORT_SYMBOL_GPL(register_syscore_ops);
37969@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37970 void unregister_syscore_ops(struct syscore_ops *ops)
37971 {
37972 mutex_lock(&syscore_ops_lock);
37973- list_del(&ops->node);
37974+ pax_list_del((struct list_head *)&ops->node);
37975 mutex_unlock(&syscore_ops_lock);
37976 }
37977 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37978diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37979index ff20f19..018f1da 100644
37980--- a/drivers/block/cciss.c
37981+++ b/drivers/block/cciss.c
37982@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37983 while (!list_empty(&h->reqQ)) {
37984 c = list_entry(h->reqQ.next, CommandList_struct, list);
37985 /* can't do anything if fifo is full */
37986- if ((h->access.fifo_full(h))) {
37987+ if ((h->access->fifo_full(h))) {
37988 dev_warn(&h->pdev->dev, "fifo full\n");
37989 break;
37990 }
37991@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37992 h->Qdepth--;
37993
37994 /* Tell the controller execute command */
37995- h->access.submit_command(h, c);
37996+ h->access->submit_command(h, c);
37997
37998 /* Put job onto the completed Q */
37999 addQ(&h->cmpQ, c);
38000@@ -3444,17 +3444,17 @@ startio:
38001
38002 static inline unsigned long get_next_completion(ctlr_info_t *h)
38003 {
38004- return h->access.command_completed(h);
38005+ return h->access->command_completed(h);
38006 }
38007
38008 static inline int interrupt_pending(ctlr_info_t *h)
38009 {
38010- return h->access.intr_pending(h);
38011+ return h->access->intr_pending(h);
38012 }
38013
38014 static inline long interrupt_not_for_us(ctlr_info_t *h)
38015 {
38016- return ((h->access.intr_pending(h) == 0) ||
38017+ return ((h->access->intr_pending(h) == 0) ||
38018 (h->interrupts_enabled == 0));
38019 }
38020
38021@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38022 u32 a;
38023
38024 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38025- return h->access.command_completed(h);
38026+ return h->access->command_completed(h);
38027
38028 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38029 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38030@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38031 trans_support & CFGTBL_Trans_use_short_tags);
38032
38033 /* Change the access methods to the performant access methods */
38034- h->access = SA5_performant_access;
38035+ h->access = &SA5_performant_access;
38036 h->transMethod = CFGTBL_Trans_Performant;
38037
38038 return;
38039@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38040 if (prod_index < 0)
38041 return -ENODEV;
38042 h->product_name = products[prod_index].product_name;
38043- h->access = *(products[prod_index].access);
38044+ h->access = products[prod_index].access;
38045
38046 if (cciss_board_disabled(h)) {
38047 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38048@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38049 }
38050
38051 /* make sure the board interrupts are off */
38052- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38053+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38054 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38055 if (rc)
38056 goto clean2;
38057@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38058 * fake ones to scoop up any residual completions.
38059 */
38060 spin_lock_irqsave(&h->lock, flags);
38061- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38062+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38063 spin_unlock_irqrestore(&h->lock, flags);
38064 free_irq(h->intr[h->intr_mode], h);
38065 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38066@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38067 dev_info(&h->pdev->dev, "Board READY.\n");
38068 dev_info(&h->pdev->dev,
38069 "Waiting for stale completions to drain.\n");
38070- h->access.set_intr_mask(h, CCISS_INTR_ON);
38071+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38072 msleep(10000);
38073- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38074+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38075
38076 rc = controller_reset_failed(h->cfgtable);
38077 if (rc)
38078@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38079 cciss_scsi_setup(h);
38080
38081 /* Turn the interrupts on so we can service requests */
38082- h->access.set_intr_mask(h, CCISS_INTR_ON);
38083+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38084
38085 /* Get the firmware version */
38086 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38087@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38088 kfree(flush_buf);
38089 if (return_code != IO_OK)
38090 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38091- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38092+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38093 free_irq(h->intr[h->intr_mode], h);
38094 }
38095
38096diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38097index 7fda30e..2f27946 100644
38098--- a/drivers/block/cciss.h
38099+++ b/drivers/block/cciss.h
38100@@ -101,7 +101,7 @@ struct ctlr_info
38101 /* information about each logical volume */
38102 drive_info_struct *drv[CISS_MAX_LUN];
38103
38104- struct access_method access;
38105+ struct access_method *access;
38106
38107 /* queue and queue Info */
38108 struct list_head reqQ;
38109@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38110 }
38111
38112 static struct access_method SA5_access = {
38113- SA5_submit_command,
38114- SA5_intr_mask,
38115- SA5_fifo_full,
38116- SA5_intr_pending,
38117- SA5_completed,
38118+ .submit_command = SA5_submit_command,
38119+ .set_intr_mask = SA5_intr_mask,
38120+ .fifo_full = SA5_fifo_full,
38121+ .intr_pending = SA5_intr_pending,
38122+ .command_completed = SA5_completed,
38123 };
38124
38125 static struct access_method SA5B_access = {
38126- SA5_submit_command,
38127- SA5B_intr_mask,
38128- SA5_fifo_full,
38129- SA5B_intr_pending,
38130- SA5_completed,
38131+ .submit_command = SA5_submit_command,
38132+ .set_intr_mask = SA5B_intr_mask,
38133+ .fifo_full = SA5_fifo_full,
38134+ .intr_pending = SA5B_intr_pending,
38135+ .command_completed = SA5_completed,
38136 };
38137
38138 static struct access_method SA5_performant_access = {
38139- SA5_submit_command,
38140- SA5_performant_intr_mask,
38141- SA5_fifo_full,
38142- SA5_performant_intr_pending,
38143- SA5_performant_completed,
38144+ .submit_command = SA5_submit_command,
38145+ .set_intr_mask = SA5_performant_intr_mask,
38146+ .fifo_full = SA5_fifo_full,
38147+ .intr_pending = SA5_performant_intr_pending,
38148+ .command_completed = SA5_performant_completed,
38149 };
38150
38151 struct board_type {
38152diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38153index 2b94403..fd6ad1f 100644
38154--- a/drivers/block/cpqarray.c
38155+++ b/drivers/block/cpqarray.c
38156@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38157 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38158 goto Enomem4;
38159 }
38160- hba[i]->access.set_intr_mask(hba[i], 0);
38161+ hba[i]->access->set_intr_mask(hba[i], 0);
38162 if (request_irq(hba[i]->intr, do_ida_intr,
38163 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38164 {
38165@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38166 add_timer(&hba[i]->timer);
38167
38168 /* Enable IRQ now that spinlock and rate limit timer are set up */
38169- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38170+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38171
38172 for(j=0; j<NWD; j++) {
38173 struct gendisk *disk = ida_gendisk[i][j];
38174@@ -694,7 +694,7 @@ DBGINFO(
38175 for(i=0; i<NR_PRODUCTS; i++) {
38176 if (board_id == products[i].board_id) {
38177 c->product_name = products[i].product_name;
38178- c->access = *(products[i].access);
38179+ c->access = products[i].access;
38180 break;
38181 }
38182 }
38183@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38184 hba[ctlr]->intr = intr;
38185 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38186 hba[ctlr]->product_name = products[j].product_name;
38187- hba[ctlr]->access = *(products[j].access);
38188+ hba[ctlr]->access = products[j].access;
38189 hba[ctlr]->ctlr = ctlr;
38190 hba[ctlr]->board_id = board_id;
38191 hba[ctlr]->pci_dev = NULL; /* not PCI */
38192@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38193
38194 while((c = h->reqQ) != NULL) {
38195 /* Can't do anything if we're busy */
38196- if (h->access.fifo_full(h) == 0)
38197+ if (h->access->fifo_full(h) == 0)
38198 return;
38199
38200 /* Get the first entry from the request Q */
38201@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38202 h->Qdepth--;
38203
38204 /* Tell the controller to do our bidding */
38205- h->access.submit_command(h, c);
38206+ h->access->submit_command(h, c);
38207
38208 /* Get onto the completion Q */
38209 addQ(&h->cmpQ, c);
38210@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38211 unsigned long flags;
38212 __u32 a,a1;
38213
38214- istat = h->access.intr_pending(h);
38215+ istat = h->access->intr_pending(h);
38216 /* Is this interrupt for us? */
38217 if (istat == 0)
38218 return IRQ_NONE;
38219@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38220 */
38221 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38222 if (istat & FIFO_NOT_EMPTY) {
38223- while((a = h->access.command_completed(h))) {
38224+ while((a = h->access->command_completed(h))) {
38225 a1 = a; a &= ~3;
38226 if ((c = h->cmpQ) == NULL)
38227 {
38228@@ -1448,11 +1448,11 @@ static int sendcmd(
38229 /*
38230 * Disable interrupt
38231 */
38232- info_p->access.set_intr_mask(info_p, 0);
38233+ info_p->access->set_intr_mask(info_p, 0);
38234 /* Make sure there is room in the command FIFO */
38235 /* Actually it should be completely empty at this time. */
38236 for (i = 200000; i > 0; i--) {
38237- temp = info_p->access.fifo_full(info_p);
38238+ temp = info_p->access->fifo_full(info_p);
38239 if (temp != 0) {
38240 break;
38241 }
38242@@ -1465,7 +1465,7 @@ DBG(
38243 /*
38244 * Send the cmd
38245 */
38246- info_p->access.submit_command(info_p, c);
38247+ info_p->access->submit_command(info_p, c);
38248 complete = pollcomplete(ctlr);
38249
38250 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38251@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38252 * we check the new geometry. Then turn interrupts back on when
38253 * we're done.
38254 */
38255- host->access.set_intr_mask(host, 0);
38256+ host->access->set_intr_mask(host, 0);
38257 getgeometry(ctlr);
38258- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38259+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38260
38261 for(i=0; i<NWD; i++) {
38262 struct gendisk *disk = ida_gendisk[ctlr][i];
38263@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38264 /* Wait (up to 2 seconds) for a command to complete */
38265
38266 for (i = 200000; i > 0; i--) {
38267- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38268+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38269 if (done == 0) {
38270 udelay(10); /* a short fixed delay */
38271 } else
38272diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38273index be73e9d..7fbf140 100644
38274--- a/drivers/block/cpqarray.h
38275+++ b/drivers/block/cpqarray.h
38276@@ -99,7 +99,7 @@ struct ctlr_info {
38277 drv_info_t drv[NWD];
38278 struct proc_dir_entry *proc;
38279
38280- struct access_method access;
38281+ struct access_method *access;
38282
38283 cmdlist_t *reqQ;
38284 cmdlist_t *cmpQ;
38285diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38286index 434c77d..6d3219a 100644
38287--- a/drivers/block/drbd/drbd_bitmap.c
38288+++ b/drivers/block/drbd/drbd_bitmap.c
38289@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38290 submit_bio(rw, bio);
38291 /* this should not count as user activity and cause the
38292 * resync to throttle -- see drbd_rs_should_slow_down(). */
38293- atomic_add(len >> 9, &device->rs_sect_ev);
38294+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38295 }
38296 }
38297
38298diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38299index b905e98..0812ed8 100644
38300--- a/drivers/block/drbd/drbd_int.h
38301+++ b/drivers/block/drbd/drbd_int.h
38302@@ -385,7 +385,7 @@ struct drbd_epoch {
38303 struct drbd_connection *connection;
38304 struct list_head list;
38305 unsigned int barrier_nr;
38306- atomic_t epoch_size; /* increased on every request added. */
38307+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38308 atomic_t active; /* increased on every req. added, and dec on every finished. */
38309 unsigned long flags;
38310 };
38311@@ -946,7 +946,7 @@ struct drbd_device {
38312 unsigned int al_tr_number;
38313 int al_tr_cycle;
38314 wait_queue_head_t seq_wait;
38315- atomic_t packet_seq;
38316+ atomic_unchecked_t packet_seq;
38317 unsigned int peer_seq;
38318 spinlock_t peer_seq_lock;
38319 unsigned long comm_bm_set; /* communicated number of set bits. */
38320@@ -955,8 +955,8 @@ struct drbd_device {
38321 struct mutex own_state_mutex;
38322 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38323 char congestion_reason; /* Why we where congested... */
38324- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38325- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38326+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38327+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38328 int rs_last_sect_ev; /* counter to compare with */
38329 int rs_last_events; /* counter of read or write "events" (unit sectors)
38330 * on the lower level device when we last looked. */
38331diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38332index 1fc8342..7e7742b 100644
38333--- a/drivers/block/drbd/drbd_main.c
38334+++ b/drivers/block/drbd/drbd_main.c
38335@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38336 p->sector = sector;
38337 p->block_id = block_id;
38338 p->blksize = blksize;
38339- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38340+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38341 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38342 }
38343
38344@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38345 return -EIO;
38346 p->sector = cpu_to_be64(req->i.sector);
38347 p->block_id = (unsigned long)req;
38348- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38349+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38350 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38351 if (device->state.conn >= C_SYNC_SOURCE &&
38352 device->state.conn <= C_PAUSED_SYNC_T)
38353@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38354 atomic_set(&device->unacked_cnt, 0);
38355 atomic_set(&device->local_cnt, 0);
38356 atomic_set(&device->pp_in_use_by_net, 0);
38357- atomic_set(&device->rs_sect_in, 0);
38358- atomic_set(&device->rs_sect_ev, 0);
38359+ atomic_set_unchecked(&device->rs_sect_in, 0);
38360+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38361 atomic_set(&device->ap_in_flight, 0);
38362 atomic_set(&device->md_io.in_use, 0);
38363
38364@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38365 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38366 struct drbd_resource *resource = connection->resource;
38367
38368- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38369- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38370+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38371+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38372 kfree(connection->current_epoch);
38373
38374 idr_destroy(&connection->peer_devices);
38375diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38376index 74df8cf..e41fc24 100644
38377--- a/drivers/block/drbd/drbd_nl.c
38378+++ b/drivers/block/drbd/drbd_nl.c
38379@@ -3637,13 +3637,13 @@ finish:
38380
38381 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38382 {
38383- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38384+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38385 struct sk_buff *msg;
38386 struct drbd_genlmsghdr *d_out;
38387 unsigned seq;
38388 int err = -ENOMEM;
38389
38390- seq = atomic_inc_return(&drbd_genl_seq);
38391+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38392 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38393 if (!msg)
38394 goto failed;
38395diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38396index cee2035..22f66bd 100644
38397--- a/drivers/block/drbd/drbd_receiver.c
38398+++ b/drivers/block/drbd/drbd_receiver.c
38399@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38400 struct drbd_device *device = peer_device->device;
38401 int err;
38402
38403- atomic_set(&device->packet_seq, 0);
38404+ atomic_set_unchecked(&device->packet_seq, 0);
38405 device->peer_seq = 0;
38406
38407 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38408@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38409 do {
38410 next_epoch = NULL;
38411
38412- epoch_size = atomic_read(&epoch->epoch_size);
38413+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38414
38415 switch (ev & ~EV_CLEANUP) {
38416 case EV_PUT:
38417@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38418 rv = FE_DESTROYED;
38419 } else {
38420 epoch->flags = 0;
38421- atomic_set(&epoch->epoch_size, 0);
38422+ atomic_set_unchecked(&epoch->epoch_size, 0);
38423 /* atomic_set(&epoch->active, 0); is already zero */
38424 if (rv == FE_STILL_LIVE)
38425 rv = FE_RECYCLED;
38426@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38427 conn_wait_active_ee_empty(connection);
38428 drbd_flush(connection);
38429
38430- if (atomic_read(&connection->current_epoch->epoch_size)) {
38431+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38432 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38433 if (epoch)
38434 break;
38435@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38436 }
38437
38438 epoch->flags = 0;
38439- atomic_set(&epoch->epoch_size, 0);
38440+ atomic_set_unchecked(&epoch->epoch_size, 0);
38441 atomic_set(&epoch->active, 0);
38442
38443 spin_lock(&connection->epoch_lock);
38444- if (atomic_read(&connection->current_epoch->epoch_size)) {
38445+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38446 list_add(&epoch->list, &connection->current_epoch->list);
38447 connection->current_epoch = epoch;
38448 connection->epochs++;
38449@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38450 list_add_tail(&peer_req->w.list, &device->sync_ee);
38451 spin_unlock_irq(&device->resource->req_lock);
38452
38453- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38454+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38455 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38456 return 0;
38457
38458@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38459 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38460 }
38461
38462- atomic_add(pi->size >> 9, &device->rs_sect_in);
38463+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38464
38465 return err;
38466 }
38467@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38468
38469 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38470 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38471- atomic_inc(&connection->current_epoch->epoch_size);
38472+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38473 err2 = drbd_drain_block(peer_device, pi->size);
38474 if (!err)
38475 err = err2;
38476@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38477
38478 spin_lock(&connection->epoch_lock);
38479 peer_req->epoch = connection->current_epoch;
38480- atomic_inc(&peer_req->epoch->epoch_size);
38481+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38482 atomic_inc(&peer_req->epoch->active);
38483 spin_unlock(&connection->epoch_lock);
38484
38485@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38486
38487 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38488 (int)part_stat_read(&disk->part0, sectors[1]) -
38489- atomic_read(&device->rs_sect_ev);
38490+ atomic_read_unchecked(&device->rs_sect_ev);
38491
38492 if (atomic_read(&device->ap_actlog_cnt)
38493 || curr_events - device->rs_last_events > 64) {
38494@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38495 device->use_csums = true;
38496 } else if (pi->cmd == P_OV_REPLY) {
38497 /* track progress, we may need to throttle */
38498- atomic_add(size >> 9, &device->rs_sect_in);
38499+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38500 peer_req->w.cb = w_e_end_ov_reply;
38501 dec_rs_pending(device);
38502 /* drbd_rs_begin_io done when we sent this request,
38503@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38504 goto out_free_e;
38505
38506 submit_for_resync:
38507- atomic_add(size >> 9, &device->rs_sect_ev);
38508+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38509
38510 submit:
38511 update_receiver_timing_details(connection, drbd_submit_peer_request);
38512@@ -4564,7 +4564,7 @@ struct data_cmd {
38513 int expect_payload;
38514 size_t pkt_size;
38515 int (*fn)(struct drbd_connection *, struct packet_info *);
38516-};
38517+} __do_const;
38518
38519 static struct data_cmd drbd_cmd_handler[] = {
38520 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38521@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38522 if (!list_empty(&connection->current_epoch->list))
38523 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38524 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38525- atomic_set(&connection->current_epoch->epoch_size, 0);
38526+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38527 connection->send.seen_any_write_yet = false;
38528
38529 drbd_info(connection, "Connection closed\n");
38530@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38531 put_ldev(device);
38532 }
38533 dec_rs_pending(device);
38534- atomic_add(blksize >> 9, &device->rs_sect_in);
38535+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38536
38537 return 0;
38538 }
38539@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38540 struct asender_cmd {
38541 size_t pkt_size;
38542 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38543-};
38544+} __do_const;
38545
38546 static struct asender_cmd asender_tbl[] = {
38547 [P_PING] = { 0, got_Ping },
38548diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38549index d0fae55..4469096 100644
38550--- a/drivers/block/drbd/drbd_worker.c
38551+++ b/drivers/block/drbd/drbd_worker.c
38552@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38553 list_add_tail(&peer_req->w.list, &device->read_ee);
38554 spin_unlock_irq(&device->resource->req_lock);
38555
38556- atomic_add(size >> 9, &device->rs_sect_ev);
38557+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38558 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38559 return 0;
38560
38561@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38562 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38563 int number, mxb;
38564
38565- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38566+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38567 device->rs_in_flight -= sect_in;
38568
38569 rcu_read_lock();
38570@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38571 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38572 struct fifo_buffer *plan;
38573
38574- atomic_set(&device->rs_sect_in, 0);
38575- atomic_set(&device->rs_sect_ev, 0);
38576+ atomic_set_unchecked(&device->rs_sect_in, 0);
38577+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38578 device->rs_in_flight = 0;
38579 device->rs_last_events =
38580 (int)part_stat_read(&disk->part0, sectors[0]) +
38581diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38582index 773e964..e85af00 100644
38583--- a/drivers/block/loop.c
38584+++ b/drivers/block/loop.c
38585@@ -234,7 +234,7 @@ static int __do_lo_send_write(struct file *file,
38586
38587 file_start_write(file);
38588 set_fs(get_ds());
38589- bw = file->f_op->write(file, buf, len, &pos);
38590+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38591 set_fs(old_fs);
38592 file_end_write(file);
38593 if (likely(bw == len))
38594diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38595index 09e628da..7607aaa 100644
38596--- a/drivers/block/pktcdvd.c
38597+++ b/drivers/block/pktcdvd.c
38598@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38599
38600 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38601 {
38602- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38603+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38604 }
38605
38606 /*
38607@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38608 return -EROFS;
38609 }
38610 pd->settings.fp = ti.fp;
38611- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38612+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38613
38614 if (ti.nwa_v) {
38615 pd->nwa = be32_to_cpu(ti.next_writable);
38616diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38617index b67066d..515b7f4 100644
38618--- a/drivers/block/rbd.c
38619+++ b/drivers/block/rbd.c
38620@@ -64,7 +64,7 @@
38621 * If the counter is already at its maximum value returns
38622 * -EINVAL without updating it.
38623 */
38624-static int atomic_inc_return_safe(atomic_t *v)
38625+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38626 {
38627 unsigned int counter;
38628
38629diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38630index e5565fb..71be10b4 100644
38631--- a/drivers/block/smart1,2.h
38632+++ b/drivers/block/smart1,2.h
38633@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38634 }
38635
38636 static struct access_method smart4_access = {
38637- smart4_submit_command,
38638- smart4_intr_mask,
38639- smart4_fifo_full,
38640- smart4_intr_pending,
38641- smart4_completed,
38642+ .submit_command = smart4_submit_command,
38643+ .set_intr_mask = smart4_intr_mask,
38644+ .fifo_full = smart4_fifo_full,
38645+ .intr_pending = smart4_intr_pending,
38646+ .command_completed = smart4_completed,
38647 };
38648
38649 /*
38650@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38651 }
38652
38653 static struct access_method smart2_access = {
38654- smart2_submit_command,
38655- smart2_intr_mask,
38656- smart2_fifo_full,
38657- smart2_intr_pending,
38658- smart2_completed,
38659+ .submit_command = smart2_submit_command,
38660+ .set_intr_mask = smart2_intr_mask,
38661+ .fifo_full = smart2_fifo_full,
38662+ .intr_pending = smart2_intr_pending,
38663+ .command_completed = smart2_completed,
38664 };
38665
38666 /*
38667@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38668 }
38669
38670 static struct access_method smart2e_access = {
38671- smart2e_submit_command,
38672- smart2e_intr_mask,
38673- smart2e_fifo_full,
38674- smart2e_intr_pending,
38675- smart2e_completed,
38676+ .submit_command = smart2e_submit_command,
38677+ .set_intr_mask = smart2e_intr_mask,
38678+ .fifo_full = smart2e_fifo_full,
38679+ .intr_pending = smart2e_intr_pending,
38680+ .command_completed = smart2e_completed,
38681 };
38682
38683 /*
38684@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38685 }
38686
38687 static struct access_method smart1_access = {
38688- smart1_submit_command,
38689- smart1_intr_mask,
38690- smart1_fifo_full,
38691- smart1_intr_pending,
38692- smart1_completed,
38693+ .submit_command = smart1_submit_command,
38694+ .set_intr_mask = smart1_intr_mask,
38695+ .fifo_full = smart1_fifo_full,
38696+ .intr_pending = smart1_intr_pending,
38697+ .command_completed = smart1_completed,
38698 };
38699diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38700index 55c135b..9f8d60c 100644
38701--- a/drivers/bluetooth/btwilink.c
38702+++ b/drivers/bluetooth/btwilink.c
38703@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38704
38705 static int bt_ti_probe(struct platform_device *pdev)
38706 {
38707- static struct ti_st *hst;
38708+ struct ti_st *hst;
38709 struct hci_dev *hdev;
38710 int err;
38711
38712diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38713index 5d28a45..a538f90 100644
38714--- a/drivers/cdrom/cdrom.c
38715+++ b/drivers/cdrom/cdrom.c
38716@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38717 ENSURE(reset, CDC_RESET);
38718 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38719 cdi->mc_flags = 0;
38720- cdo->n_minors = 0;
38721 cdi->options = CDO_USE_FFLAGS;
38722
38723 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38724@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38725 else
38726 cdi->cdda_method = CDDA_OLD;
38727
38728- if (!cdo->generic_packet)
38729- cdo->generic_packet = cdrom_dummy_generic_packet;
38730+ if (!cdo->generic_packet) {
38731+ pax_open_kernel();
38732+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38733+ pax_close_kernel();
38734+ }
38735
38736 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38737 mutex_lock(&cdrom_mutex);
38738@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38739 if (cdi->exit)
38740 cdi->exit(cdi);
38741
38742- cdi->ops->n_minors--;
38743 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38744 }
38745
38746@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38747 */
38748 nr = nframes;
38749 do {
38750- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38751+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38752 if (cgc.buffer)
38753 break;
38754
38755@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38756 struct cdrom_device_info *cdi;
38757 int ret;
38758
38759- ret = scnprintf(info + *pos, max_size - *pos, header);
38760+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38761 if (!ret)
38762 return 1;
38763
38764diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38765index 584bc31..e64a12c 100644
38766--- a/drivers/cdrom/gdrom.c
38767+++ b/drivers/cdrom/gdrom.c
38768@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38769 .audio_ioctl = gdrom_audio_ioctl,
38770 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38771 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38772- .n_minors = 1,
38773 };
38774
38775 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38776diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38777index a4af822..ed58cd1 100644
38778--- a/drivers/char/Kconfig
38779+++ b/drivers/char/Kconfig
38780@@ -17,7 +17,8 @@ config DEVMEM
38781
38782 config DEVKMEM
38783 bool "/dev/kmem virtual device support"
38784- default y
38785+ default n
38786+ depends on !GRKERNSEC_KMEM
38787 help
38788 Say Y here if you want to support the /dev/kmem device. The
38789 /dev/kmem device is rarely used, but can be used for certain
38790@@ -586,6 +587,7 @@ config DEVPORT
38791 bool
38792 depends on !M68K
38793 depends on ISA || PCI
38794+ depends on !GRKERNSEC_KMEM
38795 default y
38796
38797 source "drivers/s390/char/Kconfig"
38798diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38799index a48e05b..6bac831 100644
38800--- a/drivers/char/agp/compat_ioctl.c
38801+++ b/drivers/char/agp/compat_ioctl.c
38802@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38803 return -ENOMEM;
38804 }
38805
38806- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38807+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38808 sizeof(*usegment) * ureserve.seg_count)) {
38809 kfree(usegment);
38810 kfree(ksegment);
38811diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38812index 09f17eb..8531d2f 100644
38813--- a/drivers/char/agp/frontend.c
38814+++ b/drivers/char/agp/frontend.c
38815@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38816 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38817 return -EFAULT;
38818
38819- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38820+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38821 return -EFAULT;
38822
38823 client = agp_find_client_by_pid(reserve.pid);
38824@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38825 if (segment == NULL)
38826 return -ENOMEM;
38827
38828- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38829+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38830 sizeof(struct agp_segment) * reserve.seg_count)) {
38831 kfree(segment);
38832 return -EFAULT;
38833diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38834index 4f94375..413694e 100644
38835--- a/drivers/char/genrtc.c
38836+++ b/drivers/char/genrtc.c
38837@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38838 switch (cmd) {
38839
38840 case RTC_PLL_GET:
38841+ memset(&pll, 0, sizeof(pll));
38842 if (get_rtc_pll(&pll))
38843 return -EINVAL;
38844 else
38845diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38846index 5c0baa9..44011b1 100644
38847--- a/drivers/char/hpet.c
38848+++ b/drivers/char/hpet.c
38849@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38850 }
38851
38852 static int
38853-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38854+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38855 struct hpet_info *info)
38856 {
38857 struct hpet_timer __iomem *timer;
38858diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
38859index 24cc4ed..f9807cf 100644
38860--- a/drivers/char/i8k.c
38861+++ b/drivers/char/i8k.c
38862@@ -788,7 +788,7 @@ static const struct i8k_config_data i8k_config_data[] = {
38863 },
38864 };
38865
38866-static struct dmi_system_id i8k_dmi_table[] __initdata = {
38867+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
38868 {
38869 .ident = "Dell Inspiron",
38870 .matches = {
38871diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38872index 9bb5928..57a7801 100644
38873--- a/drivers/char/ipmi/ipmi_msghandler.c
38874+++ b/drivers/char/ipmi/ipmi_msghandler.c
38875@@ -436,7 +436,7 @@ struct ipmi_smi {
38876 struct proc_dir_entry *proc_dir;
38877 char proc_dir_name[10];
38878
38879- atomic_t stats[IPMI_NUM_STATS];
38880+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38881
38882 /*
38883 * run_to_completion duplicate of smb_info, smi_info
38884@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38885 static DEFINE_MUTEX(smi_watchers_mutex);
38886
38887 #define ipmi_inc_stat(intf, stat) \
38888- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38889+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38890 #define ipmi_get_stat(intf, stat) \
38891- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38892+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38893
38894 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38895 "ACPI", "SMBIOS", "PCI",
38896@@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38897 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38898 init_waitqueue_head(&intf->waitq);
38899 for (i = 0; i < IPMI_NUM_STATS; i++)
38900- atomic_set(&intf->stats[i], 0);
38901+ atomic_set_unchecked(&intf->stats[i], 0);
38902
38903 intf->proc_dir = NULL;
38904
38905diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38906index 518585c..6c985cef 100644
38907--- a/drivers/char/ipmi/ipmi_si_intf.c
38908+++ b/drivers/char/ipmi/ipmi_si_intf.c
38909@@ -289,7 +289,7 @@ struct smi_info {
38910 unsigned char slave_addr;
38911
38912 /* Counters and things for the proc filesystem. */
38913- atomic_t stats[SI_NUM_STATS];
38914+ atomic_unchecked_t stats[SI_NUM_STATS];
38915
38916 struct task_struct *thread;
38917
38918@@ -298,9 +298,9 @@ struct smi_info {
38919 };
38920
38921 #define smi_inc_stat(smi, stat) \
38922- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38923+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38924 #define smi_get_stat(smi, stat) \
38925- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38926+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38927
38928 #define SI_MAX_PARMS 4
38929
38930@@ -3498,7 +3498,7 @@ static int try_smi_init(struct smi_info *new_smi)
38931 atomic_set(&new_smi->req_events, 0);
38932 new_smi->run_to_completion = false;
38933 for (i = 0; i < SI_NUM_STATS; i++)
38934- atomic_set(&new_smi->stats[i], 0);
38935+ atomic_set_unchecked(&new_smi->stats[i], 0);
38936
38937 new_smi->interrupt_disabled = true;
38938 atomic_set(&new_smi->need_watch, 0);
38939diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38940index 297110c..3f69b43 100644
38941--- a/drivers/char/mem.c
38942+++ b/drivers/char/mem.c
38943@@ -18,6 +18,7 @@
38944 #include <linux/raw.h>
38945 #include <linux/tty.h>
38946 #include <linux/capability.h>
38947+#include <linux/security.h>
38948 #include <linux/ptrace.h>
38949 #include <linux/device.h>
38950 #include <linux/highmem.h>
38951@@ -36,6 +37,10 @@
38952
38953 #define DEVPORT_MINOR 4
38954
38955+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38956+extern const struct file_operations grsec_fops;
38957+#endif
38958+
38959 static inline unsigned long size_inside_page(unsigned long start,
38960 unsigned long size)
38961 {
38962@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38963
38964 while (cursor < to) {
38965 if (!devmem_is_allowed(pfn)) {
38966+#ifdef CONFIG_GRKERNSEC_KMEM
38967+ gr_handle_mem_readwrite(from, to);
38968+#else
38969 printk(KERN_INFO
38970 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38971 current->comm, from, to);
38972+#endif
38973 return 0;
38974 }
38975 cursor += PAGE_SIZE;
38976@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38977 }
38978 return 1;
38979 }
38980+#elif defined(CONFIG_GRKERNSEC_KMEM)
38981+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38982+{
38983+ return 0;
38984+}
38985 #else
38986 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38987 {
38988@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38989 #endif
38990
38991 while (count > 0) {
38992- unsigned long remaining;
38993+ unsigned long remaining = 0;
38994+ char *temp;
38995
38996 sz = size_inside_page(p, count);
38997
38998@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38999 if (!ptr)
39000 return -EFAULT;
39001
39002- remaining = copy_to_user(buf, ptr, sz);
39003+#ifdef CONFIG_PAX_USERCOPY
39004+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39005+ if (!temp) {
39006+ unxlate_dev_mem_ptr(p, ptr);
39007+ return -ENOMEM;
39008+ }
39009+ remaining = probe_kernel_read(temp, ptr, sz);
39010+#else
39011+ temp = ptr;
39012+#endif
39013+
39014+ if (!remaining)
39015+ remaining = copy_to_user(buf, temp, sz);
39016+
39017+#ifdef CONFIG_PAX_USERCOPY
39018+ kfree(temp);
39019+#endif
39020+
39021 unxlate_dev_mem_ptr(p, ptr);
39022 if (remaining)
39023 return -EFAULT;
39024@@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39025 size_t count, loff_t *ppos)
39026 {
39027 unsigned long p = *ppos;
39028- ssize_t low_count, read, sz;
39029+ ssize_t low_count, read, sz, err = 0;
39030 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39031- int err = 0;
39032
39033 read = 0;
39034 if (p < (unsigned long) high_memory) {
39035@@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39036 }
39037 #endif
39038 while (low_count > 0) {
39039+ char *temp;
39040+
39041 sz = size_inside_page(p, low_count);
39042
39043 /*
39044@@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39045 */
39046 kbuf = xlate_dev_kmem_ptr((void *)p);
39047
39048- if (copy_to_user(buf, kbuf, sz))
39049+#ifdef CONFIG_PAX_USERCOPY
39050+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39051+ if (!temp)
39052+ return -ENOMEM;
39053+ err = probe_kernel_read(temp, kbuf, sz);
39054+#else
39055+ temp = kbuf;
39056+#endif
39057+
39058+ if (!err)
39059+ err = copy_to_user(buf, temp, sz);
39060+
39061+#ifdef CONFIG_PAX_USERCOPY
39062+ kfree(temp);
39063+#endif
39064+
39065+ if (err)
39066 return -EFAULT;
39067 buf += sz;
39068 p += sz;
39069@@ -804,6 +853,9 @@ static const struct memdev {
39070 #ifdef CONFIG_PRINTK
39071 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
39072 #endif
39073+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39074+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
39075+#endif
39076 };
39077
39078 static int memory_open(struct inode *inode, struct file *filp)
39079@@ -865,7 +917,7 @@ static int __init chr_dev_init(void)
39080 continue;
39081
39082 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39083- NULL, devlist[minor].name);
39084+ NULL, "%s", devlist[minor].name);
39085 }
39086
39087 return tty_init();
39088diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39089index 9df78e2..01ba9ae 100644
39090--- a/drivers/char/nvram.c
39091+++ b/drivers/char/nvram.c
39092@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39093
39094 spin_unlock_irq(&rtc_lock);
39095
39096- if (copy_to_user(buf, contents, tmp - contents))
39097+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39098 return -EFAULT;
39099
39100 *ppos = i;
39101diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39102index 0ea9986..e7b07e4 100644
39103--- a/drivers/char/pcmcia/synclink_cs.c
39104+++ b/drivers/char/pcmcia/synclink_cs.c
39105@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39106
39107 if (debug_level >= DEBUG_LEVEL_INFO)
39108 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39109- __FILE__, __LINE__, info->device_name, port->count);
39110+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39111
39112 if (tty_port_close_start(port, tty, filp) == 0)
39113 goto cleanup;
39114@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39115 cleanup:
39116 if (debug_level >= DEBUG_LEVEL_INFO)
39117 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39118- tty->driver->name, port->count);
39119+ tty->driver->name, atomic_read(&port->count));
39120 }
39121
39122 /* Wait until the transmitter is empty.
39123@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39124
39125 if (debug_level >= DEBUG_LEVEL_INFO)
39126 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39127- __FILE__, __LINE__, tty->driver->name, port->count);
39128+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39129
39130 /* If port is closing, signal caller to try again */
39131 if (port->flags & ASYNC_CLOSING){
39132@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39133 goto cleanup;
39134 }
39135 spin_lock(&port->lock);
39136- port->count++;
39137+ atomic_inc(&port->count);
39138 spin_unlock(&port->lock);
39139 spin_unlock_irqrestore(&info->netlock, flags);
39140
39141- if (port->count == 1) {
39142+ if (atomic_read(&port->count) == 1) {
39143 /* 1st open on this device, init hardware */
39144 retval = startup(info, tty);
39145 if (retval < 0)
39146@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39147 unsigned short new_crctype;
39148
39149 /* return error if TTY interface open */
39150- if (info->port.count)
39151+ if (atomic_read(&info->port.count))
39152 return -EBUSY;
39153
39154 switch (encoding)
39155@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39156
39157 /* arbitrate between network and tty opens */
39158 spin_lock_irqsave(&info->netlock, flags);
39159- if (info->port.count != 0 || info->netcount != 0) {
39160+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39161 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39162 spin_unlock_irqrestore(&info->netlock, flags);
39163 return -EBUSY;
39164@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39165 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39166
39167 /* return error if TTY interface open */
39168- if (info->port.count)
39169+ if (atomic_read(&info->port.count))
39170 return -EBUSY;
39171
39172 if (cmd != SIOCWANDEV)
39173diff --git a/drivers/char/random.c b/drivers/char/random.c
39174index 9cd6968..6416f00 100644
39175--- a/drivers/char/random.c
39176+++ b/drivers/char/random.c
39177@@ -289,9 +289,6 @@
39178 /*
39179 * To allow fractional bits to be tracked, the entropy_count field is
39180 * denominated in units of 1/8th bits.
39181- *
39182- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39183- * credit_entropy_bits() needs to be 64 bits wide.
39184 */
39185 #define ENTROPY_SHIFT 3
39186 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39187@@ -439,9 +436,9 @@ struct entropy_store {
39188 };
39189
39190 static void push_to_pool(struct work_struct *work);
39191-static __u32 input_pool_data[INPUT_POOL_WORDS];
39192-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39193-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39194+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39195+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39196+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39197
39198 static struct entropy_store input_pool = {
39199 .poolinfo = &poolinfo_table[0],
39200@@ -635,7 +632,7 @@ retry:
39201 /* The +2 corresponds to the /4 in the denominator */
39202
39203 do {
39204- unsigned int anfrac = min(pnfrac, pool_size/2);
39205+ u64 anfrac = min(pnfrac, pool_size/2);
39206 unsigned int add =
39207 ((pool_size - entropy_count)*anfrac*3) >> s;
39208
39209@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39210
39211 extract_buf(r, tmp);
39212 i = min_t(int, nbytes, EXTRACT_SIZE);
39213- if (copy_to_user(buf, tmp, i)) {
39214+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39215 ret = -EFAULT;
39216 break;
39217 }
39218@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39219 static int proc_do_uuid(struct ctl_table *table, int write,
39220 void __user *buffer, size_t *lenp, loff_t *ppos)
39221 {
39222- struct ctl_table fake_table;
39223+ ctl_table_no_const fake_table;
39224 unsigned char buf[64], tmp_uuid[16], *uuid;
39225
39226 uuid = table->data;
39227@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39228 static int proc_do_entropy(struct ctl_table *table, int write,
39229 void __user *buffer, size_t *lenp, loff_t *ppos)
39230 {
39231- struct ctl_table fake_table;
39232+ ctl_table_no_const fake_table;
39233 int entropy_count;
39234
39235 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39236diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39237index e496dae..3db53b6 100644
39238--- a/drivers/char/sonypi.c
39239+++ b/drivers/char/sonypi.c
39240@@ -54,6 +54,7 @@
39241
39242 #include <asm/uaccess.h>
39243 #include <asm/io.h>
39244+#include <asm/local.h>
39245
39246 #include <linux/sonypi.h>
39247
39248@@ -490,7 +491,7 @@ static struct sonypi_device {
39249 spinlock_t fifo_lock;
39250 wait_queue_head_t fifo_proc_list;
39251 struct fasync_struct *fifo_async;
39252- int open_count;
39253+ local_t open_count;
39254 int model;
39255 struct input_dev *input_jog_dev;
39256 struct input_dev *input_key_dev;
39257@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39258 static int sonypi_misc_release(struct inode *inode, struct file *file)
39259 {
39260 mutex_lock(&sonypi_device.lock);
39261- sonypi_device.open_count--;
39262+ local_dec(&sonypi_device.open_count);
39263 mutex_unlock(&sonypi_device.lock);
39264 return 0;
39265 }
39266@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39267 {
39268 mutex_lock(&sonypi_device.lock);
39269 /* Flush input queue on first open */
39270- if (!sonypi_device.open_count)
39271+ if (!local_read(&sonypi_device.open_count))
39272 kfifo_reset(&sonypi_device.fifo);
39273- sonypi_device.open_count++;
39274+ local_inc(&sonypi_device.open_count);
39275 mutex_unlock(&sonypi_device.lock);
39276
39277 return 0;
39278@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39279
39280 static struct platform_device *sonypi_platform_device;
39281
39282-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39283+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39284 {
39285 .ident = "Sony Vaio",
39286 .matches = {
39287diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39288index 565a947..dcdc06e 100644
39289--- a/drivers/char/tpm/tpm_acpi.c
39290+++ b/drivers/char/tpm/tpm_acpi.c
39291@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39292 virt = acpi_os_map_iomem(start, len);
39293 if (!virt) {
39294 kfree(log->bios_event_log);
39295+ log->bios_event_log = NULL;
39296 printk("%s: ERROR - Unable to map memory\n", __func__);
39297 return -EIO;
39298 }
39299
39300- memcpy_fromio(log->bios_event_log, virt, len);
39301+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39302
39303 acpi_os_unmap_iomem(virt, len);
39304 return 0;
39305diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39306index 3a56a13..f8cbd25 100644
39307--- a/drivers/char/tpm/tpm_eventlog.c
39308+++ b/drivers/char/tpm/tpm_eventlog.c
39309@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39310 event = addr;
39311
39312 if ((event->event_type == 0 && event->event_size == 0) ||
39313- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39314+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39315 return NULL;
39316
39317 return addr;
39318@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39319 return NULL;
39320
39321 if ((event->event_type == 0 && event->event_size == 0) ||
39322- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39323+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39324 return NULL;
39325
39326 (*pos)++;
39327@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39328 int i;
39329
39330 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39331- seq_putc(m, data[i]);
39332+ if (!seq_putc(m, data[i]))
39333+ return -EFAULT;
39334
39335 return 0;
39336 }
39337diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39338index 72d7028..1586601 100644
39339--- a/drivers/char/virtio_console.c
39340+++ b/drivers/char/virtio_console.c
39341@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39342 if (to_user) {
39343 ssize_t ret;
39344
39345- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39346+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39347 if (ret)
39348 return -EFAULT;
39349 } else {
39350@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39351 if (!port_has_data(port) && !port->host_connected)
39352 return 0;
39353
39354- return fill_readbuf(port, ubuf, count, true);
39355+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39356 }
39357
39358 static int wait_port_writable(struct port *port, bool nonblock)
39359diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39360index 956b7e5..b655045 100644
39361--- a/drivers/clk/clk-composite.c
39362+++ b/drivers/clk/clk-composite.c
39363@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39364 struct clk *clk;
39365 struct clk_init_data init;
39366 struct clk_composite *composite;
39367- struct clk_ops *clk_composite_ops;
39368+ clk_ops_no_const *clk_composite_ops;
39369
39370 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39371 if (!composite) {
39372diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39373index dd3a78c..386d49c 100644
39374--- a/drivers/clk/socfpga/clk-gate.c
39375+++ b/drivers/clk/socfpga/clk-gate.c
39376@@ -22,6 +22,7 @@
39377 #include <linux/mfd/syscon.h>
39378 #include <linux/of.h>
39379 #include <linux/regmap.h>
39380+#include <asm/pgtable.h>
39381
39382 #include "clk.h"
39383
39384@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39385 return 0;
39386 }
39387
39388-static struct clk_ops gateclk_ops = {
39389+static clk_ops_no_const gateclk_ops __read_only = {
39390 .prepare = socfpga_clk_prepare,
39391 .recalc_rate = socfpga_clk_recalc_rate,
39392 .get_parent = socfpga_clk_get_parent,
39393@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39394 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39395 socfpga_clk->hw.bit_idx = clk_gate[1];
39396
39397- gateclk_ops.enable = clk_gate_ops.enable;
39398- gateclk_ops.disable = clk_gate_ops.disable;
39399+ pax_open_kernel();
39400+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39401+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39402+ pax_close_kernel();
39403 }
39404
39405 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39406diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39407index de6da95..c98278b 100644
39408--- a/drivers/clk/socfpga/clk-pll.c
39409+++ b/drivers/clk/socfpga/clk-pll.c
39410@@ -21,6 +21,7 @@
39411 #include <linux/io.h>
39412 #include <linux/of.h>
39413 #include <linux/of_address.h>
39414+#include <asm/pgtable.h>
39415
39416 #include "clk.h"
39417
39418@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39419 CLK_MGR_PLL_CLK_SRC_MASK;
39420 }
39421
39422-static struct clk_ops clk_pll_ops = {
39423+static clk_ops_no_const clk_pll_ops __read_only = {
39424 .recalc_rate = clk_pll_recalc_rate,
39425 .get_parent = clk_pll_get_parent,
39426 };
39427@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39428 pll_clk->hw.hw.init = &init;
39429
39430 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39431- clk_pll_ops.enable = clk_gate_ops.enable;
39432- clk_pll_ops.disable = clk_gate_ops.disable;
39433+ pax_open_kernel();
39434+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39435+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39436+ pax_close_kernel();
39437
39438 clk = clk_register(NULL, &pll_clk->hw.hw);
39439 if (WARN_ON(IS_ERR(clk))) {
39440diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39441index b0c18ed..1713a80 100644
39442--- a/drivers/cpufreq/acpi-cpufreq.c
39443+++ b/drivers/cpufreq/acpi-cpufreq.c
39444@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39445 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39446 per_cpu(acfreq_data, cpu) = data;
39447
39448- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39449- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39450+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39451+ pax_open_kernel();
39452+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39453+ pax_close_kernel();
39454+ }
39455
39456 result = acpi_processor_register_performance(data->acpi_data, cpu);
39457 if (result)
39458@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39459 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39460 break;
39461 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39462- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39463+ pax_open_kernel();
39464+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39465+ pax_close_kernel();
39466 break;
39467 default:
39468 break;
39469@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39470 if (!msrs)
39471 return;
39472
39473- acpi_cpufreq_driver.boost_supported = true;
39474- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39475+ pax_open_kernel();
39476+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39477+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39478+ pax_close_kernel();
39479
39480 cpu_notifier_register_begin();
39481
39482diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39483index bab67db..91af7e3 100644
39484--- a/drivers/cpufreq/cpufreq-dt.c
39485+++ b/drivers/cpufreq/cpufreq-dt.c
39486@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39487 if (!IS_ERR(cpu_reg))
39488 regulator_put(cpu_reg);
39489
39490- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39491+ pax_open_kernel();
39492+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39493+ pax_close_kernel();
39494
39495 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39496 if (ret)
39497diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39498index 8ae655c..3141442 100644
39499--- a/drivers/cpufreq/cpufreq.c
39500+++ b/drivers/cpufreq/cpufreq.c
39501@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39502 }
39503
39504 mutex_lock(&cpufreq_governor_mutex);
39505- list_del(&governor->governor_list);
39506+ pax_list_del(&governor->governor_list);
39507 mutex_unlock(&cpufreq_governor_mutex);
39508 return;
39509 }
39510@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39511 return NOTIFY_OK;
39512 }
39513
39514-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39515+static struct notifier_block cpufreq_cpu_notifier = {
39516 .notifier_call = cpufreq_cpu_callback,
39517 };
39518
39519@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39520 return 0;
39521
39522 write_lock_irqsave(&cpufreq_driver_lock, flags);
39523- cpufreq_driver->boost_enabled = state;
39524+ pax_open_kernel();
39525+ *(bool *)&cpufreq_driver->boost_enabled = state;
39526+ pax_close_kernel();
39527 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39528
39529 ret = cpufreq_driver->set_boost(state);
39530 if (ret) {
39531 write_lock_irqsave(&cpufreq_driver_lock, flags);
39532- cpufreq_driver->boost_enabled = !state;
39533+ pax_open_kernel();
39534+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39535+ pax_close_kernel();
39536 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39537
39538 pr_err("%s: Cannot %s BOOST\n",
39539@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39540 cpufreq_driver = driver_data;
39541 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39542
39543- if (driver_data->setpolicy)
39544- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39545+ if (driver_data->setpolicy) {
39546+ pax_open_kernel();
39547+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39548+ pax_close_kernel();
39549+ }
39550
39551 if (cpufreq_boost_supported()) {
39552 /*
39553 * Check if driver provides function to enable boost -
39554 * if not, use cpufreq_boost_set_sw as default
39555 */
39556- if (!cpufreq_driver->set_boost)
39557- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39558+ if (!cpufreq_driver->set_boost) {
39559+ pax_open_kernel();
39560+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39561+ pax_close_kernel();
39562+ }
39563
39564 ret = cpufreq_sysfs_create_file(&boost.attr);
39565 if (ret) {
39566diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39567index 1b44496..b80ff5e 100644
39568--- a/drivers/cpufreq/cpufreq_governor.c
39569+++ b/drivers/cpufreq/cpufreq_governor.c
39570@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39571 struct dbs_data *dbs_data;
39572 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39573 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39574- struct od_ops *od_ops = NULL;
39575+ const struct od_ops *od_ops = NULL;
39576 struct od_dbs_tuners *od_tuners = NULL;
39577 struct cs_dbs_tuners *cs_tuners = NULL;
39578 struct cpu_dbs_common_info *cpu_cdbs;
39579@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39580
39581 if ((cdata->governor == GOV_CONSERVATIVE) &&
39582 (!policy->governor->initialized)) {
39583- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39584+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39585
39586 cpufreq_register_notifier(cs_ops->notifier_block,
39587 CPUFREQ_TRANSITION_NOTIFIER);
39588@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39589
39590 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39591 (policy->governor->initialized == 1)) {
39592- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39593+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39594
39595 cpufreq_unregister_notifier(cs_ops->notifier_block,
39596 CPUFREQ_TRANSITION_NOTIFIER);
39597diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39598index cc401d1..8197340 100644
39599--- a/drivers/cpufreq/cpufreq_governor.h
39600+++ b/drivers/cpufreq/cpufreq_governor.h
39601@@ -212,7 +212,7 @@ struct common_dbs_data {
39602 void (*exit)(struct dbs_data *dbs_data);
39603
39604 /* Governor specific ops, see below */
39605- void *gov_ops;
39606+ const void *gov_ops;
39607 };
39608
39609 /* Governor Per policy data */
39610@@ -232,7 +232,7 @@ struct od_ops {
39611 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39612 unsigned int freq_next, unsigned int relation);
39613 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39614-};
39615+} __no_const;
39616
39617 struct cs_ops {
39618 struct notifier_block *notifier_block;
39619diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39620index ad3f38f..8f086cd 100644
39621--- a/drivers/cpufreq/cpufreq_ondemand.c
39622+++ b/drivers/cpufreq/cpufreq_ondemand.c
39623@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39624
39625 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39626
39627-static struct od_ops od_ops = {
39628+static struct od_ops od_ops __read_only = {
39629 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39630 .powersave_bias_target = generic_powersave_bias_target,
39631 .freq_increase = dbs_freq_increase,
39632@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39633 (struct cpufreq_policy *, unsigned int, unsigned int),
39634 unsigned int powersave_bias)
39635 {
39636- od_ops.powersave_bias_target = f;
39637+ pax_open_kernel();
39638+ *(void **)&od_ops.powersave_bias_target = f;
39639+ pax_close_kernel();
39640 od_set_powersave_bias(powersave_bias);
39641 }
39642 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39643
39644 void od_unregister_powersave_bias_handler(void)
39645 {
39646- od_ops.powersave_bias_target = generic_powersave_bias_target;
39647+ pax_open_kernel();
39648+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39649+ pax_close_kernel();
39650 od_set_powersave_bias(0);
39651 }
39652 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39653diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39654index 872c577..5fb3c20 100644
39655--- a/drivers/cpufreq/intel_pstate.c
39656+++ b/drivers/cpufreq/intel_pstate.c
39657@@ -133,10 +133,10 @@ struct pstate_funcs {
39658 struct cpu_defaults {
39659 struct pstate_adjust_policy pid_policy;
39660 struct pstate_funcs funcs;
39661-};
39662+} __do_const;
39663
39664 static struct pstate_adjust_policy pid_params;
39665-static struct pstate_funcs pstate_funcs;
39666+static struct pstate_funcs *pstate_funcs;
39667 static int hwp_active;
39668
39669 struct perf_limits {
39670@@ -690,18 +690,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39671
39672 cpu->pstate.current_pstate = pstate;
39673
39674- pstate_funcs.set(cpu, pstate);
39675+ pstate_funcs->set(cpu, pstate);
39676 }
39677
39678 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39679 {
39680- cpu->pstate.min_pstate = pstate_funcs.get_min();
39681- cpu->pstate.max_pstate = pstate_funcs.get_max();
39682- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39683- cpu->pstate.scaling = pstate_funcs.get_scaling();
39684+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39685+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39686+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39687+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39688
39689- if (pstate_funcs.get_vid)
39690- pstate_funcs.get_vid(cpu);
39691+ if (pstate_funcs->get_vid)
39692+ pstate_funcs->get_vid(cpu);
39693 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39694 }
39695
39696@@ -1030,9 +1030,9 @@ static int intel_pstate_msrs_not_valid(void)
39697 rdmsrl(MSR_IA32_APERF, aperf);
39698 rdmsrl(MSR_IA32_MPERF, mperf);
39699
39700- if (!pstate_funcs.get_max() ||
39701- !pstate_funcs.get_min() ||
39702- !pstate_funcs.get_turbo())
39703+ if (!pstate_funcs->get_max() ||
39704+ !pstate_funcs->get_min() ||
39705+ !pstate_funcs->get_turbo())
39706 return -ENODEV;
39707
39708 rdmsrl(MSR_IA32_APERF, tmp);
39709@@ -1046,7 +1046,7 @@ static int intel_pstate_msrs_not_valid(void)
39710 return 0;
39711 }
39712
39713-static void copy_pid_params(struct pstate_adjust_policy *policy)
39714+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39715 {
39716 pid_params.sample_rate_ms = policy->sample_rate_ms;
39717 pid_params.p_gain_pct = policy->p_gain_pct;
39718@@ -1058,12 +1058,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39719
39720 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39721 {
39722- pstate_funcs.get_max = funcs->get_max;
39723- pstate_funcs.get_min = funcs->get_min;
39724- pstate_funcs.get_turbo = funcs->get_turbo;
39725- pstate_funcs.get_scaling = funcs->get_scaling;
39726- pstate_funcs.set = funcs->set;
39727- pstate_funcs.get_vid = funcs->get_vid;
39728+ pstate_funcs = funcs;
39729 }
39730
39731 #if IS_ENABLED(CONFIG_ACPI)
39732diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39733index 529cfd9..0e28fff 100644
39734--- a/drivers/cpufreq/p4-clockmod.c
39735+++ b/drivers/cpufreq/p4-clockmod.c
39736@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39737 case 0x0F: /* Core Duo */
39738 case 0x16: /* Celeron Core */
39739 case 0x1C: /* Atom */
39740- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39741+ pax_open_kernel();
39742+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39743+ pax_close_kernel();
39744 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39745 case 0x0D: /* Pentium M (Dothan) */
39746- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39747+ pax_open_kernel();
39748+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39749+ pax_close_kernel();
39750 /* fall through */
39751 case 0x09: /* Pentium M (Banias) */
39752 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39753@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39754
39755 /* on P-4s, the TSC runs with constant frequency independent whether
39756 * throttling is active or not. */
39757- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39758+ pax_open_kernel();
39759+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39760+ pax_close_kernel();
39761
39762 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39763 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39764diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39765index 9bb42ba..b01b4a2 100644
39766--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39767+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39768@@ -18,14 +18,12 @@
39769 #include <asm/head.h>
39770 #include <asm/timer.h>
39771
39772-static struct cpufreq_driver *cpufreq_us3_driver;
39773-
39774 struct us3_freq_percpu_info {
39775 struct cpufreq_frequency_table table[4];
39776 };
39777
39778 /* Indexed by cpu number. */
39779-static struct us3_freq_percpu_info *us3_freq_table;
39780+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39781
39782 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39783 * in the Safari config register.
39784@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39785
39786 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39787 {
39788- if (cpufreq_us3_driver)
39789- us3_freq_target(policy, 0);
39790+ us3_freq_target(policy, 0);
39791
39792 return 0;
39793 }
39794
39795+static int __init us3_freq_init(void);
39796+static void __exit us3_freq_exit(void);
39797+
39798+static struct cpufreq_driver cpufreq_us3_driver = {
39799+ .init = us3_freq_cpu_init,
39800+ .verify = cpufreq_generic_frequency_table_verify,
39801+ .target_index = us3_freq_target,
39802+ .get = us3_freq_get,
39803+ .exit = us3_freq_cpu_exit,
39804+ .name = "UltraSPARC-III",
39805+
39806+};
39807+
39808 static int __init us3_freq_init(void)
39809 {
39810 unsigned long manuf, impl, ver;
39811- int ret;
39812
39813 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39814 return -ENODEV;
39815@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39816 (impl == CHEETAH_IMPL ||
39817 impl == CHEETAH_PLUS_IMPL ||
39818 impl == JAGUAR_IMPL ||
39819- impl == PANTHER_IMPL)) {
39820- struct cpufreq_driver *driver;
39821-
39822- ret = -ENOMEM;
39823- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39824- if (!driver)
39825- goto err_out;
39826-
39827- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39828- GFP_KERNEL);
39829- if (!us3_freq_table)
39830- goto err_out;
39831-
39832- driver->init = us3_freq_cpu_init;
39833- driver->verify = cpufreq_generic_frequency_table_verify;
39834- driver->target_index = us3_freq_target;
39835- driver->get = us3_freq_get;
39836- driver->exit = us3_freq_cpu_exit;
39837- strcpy(driver->name, "UltraSPARC-III");
39838-
39839- cpufreq_us3_driver = driver;
39840- ret = cpufreq_register_driver(driver);
39841- if (ret)
39842- goto err_out;
39843-
39844- return 0;
39845-
39846-err_out:
39847- if (driver) {
39848- kfree(driver);
39849- cpufreq_us3_driver = NULL;
39850- }
39851- kfree(us3_freq_table);
39852- us3_freq_table = NULL;
39853- return ret;
39854- }
39855+ impl == PANTHER_IMPL))
39856+ return cpufreq_register_driver(&cpufreq_us3_driver);
39857
39858 return -ENODEV;
39859 }
39860
39861 static void __exit us3_freq_exit(void)
39862 {
39863- if (cpufreq_us3_driver) {
39864- cpufreq_unregister_driver(cpufreq_us3_driver);
39865- kfree(cpufreq_us3_driver);
39866- cpufreq_us3_driver = NULL;
39867- kfree(us3_freq_table);
39868- us3_freq_table = NULL;
39869- }
39870+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39871 }
39872
39873 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39874diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39875index 7d4a315..21bb886 100644
39876--- a/drivers/cpufreq/speedstep-centrino.c
39877+++ b/drivers/cpufreq/speedstep-centrino.c
39878@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39879 !cpu_has(cpu, X86_FEATURE_EST))
39880 return -ENODEV;
39881
39882- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39883- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39884+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39885+ pax_open_kernel();
39886+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39887+ pax_close_kernel();
39888+ }
39889
39890 if (policy->cpu != 0)
39891 return -ENODEV;
39892diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39893index 2697e87..c32476c 100644
39894--- a/drivers/cpuidle/driver.c
39895+++ b/drivers/cpuidle/driver.c
39896@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39897
39898 static void poll_idle_init(struct cpuidle_driver *drv)
39899 {
39900- struct cpuidle_state *state = &drv->states[0];
39901+ cpuidle_state_no_const *state = &drv->states[0];
39902
39903 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39904 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39905diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39906index fb9f511..213e6cc 100644
39907--- a/drivers/cpuidle/governor.c
39908+++ b/drivers/cpuidle/governor.c
39909@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39910 mutex_lock(&cpuidle_lock);
39911 if (__cpuidle_find_governor(gov->name) == NULL) {
39912 ret = 0;
39913- list_add_tail(&gov->governor_list, &cpuidle_governors);
39914+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39915 if (!cpuidle_curr_governor ||
39916 cpuidle_curr_governor->rating < gov->rating)
39917 cpuidle_switch_governor(gov);
39918diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39919index 832a2c3..1794080 100644
39920--- a/drivers/cpuidle/sysfs.c
39921+++ b/drivers/cpuidle/sysfs.c
39922@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39923 NULL
39924 };
39925
39926-static struct attribute_group cpuidle_attr_group = {
39927+static attribute_group_no_const cpuidle_attr_group = {
39928 .attrs = cpuidle_default_attrs,
39929 .name = "cpuidle",
39930 };
39931diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39932index 8d2a772..33826c9 100644
39933--- a/drivers/crypto/hifn_795x.c
39934+++ b/drivers/crypto/hifn_795x.c
39935@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39936 MODULE_PARM_DESC(hifn_pll_ref,
39937 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39938
39939-static atomic_t hifn_dev_number;
39940+static atomic_unchecked_t hifn_dev_number;
39941
39942 #define ACRYPTO_OP_DECRYPT 0
39943 #define ACRYPTO_OP_ENCRYPT 1
39944@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39945 goto err_out_disable_pci_device;
39946
39947 snprintf(name, sizeof(name), "hifn%d",
39948- atomic_inc_return(&hifn_dev_number)-1);
39949+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39950
39951 err = pci_request_regions(pdev, name);
39952 if (err)
39953diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39954index 30b538d8..1610d75 100644
39955--- a/drivers/devfreq/devfreq.c
39956+++ b/drivers/devfreq/devfreq.c
39957@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39958 goto err_out;
39959 }
39960
39961- list_add(&governor->node, &devfreq_governor_list);
39962+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39963
39964 list_for_each_entry(devfreq, &devfreq_list, node) {
39965 int ret = 0;
39966@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39967 }
39968 }
39969
39970- list_del(&governor->node);
39971+ pax_list_del((struct list_head *)&governor->node);
39972 err_out:
39973 mutex_unlock(&devfreq_list_lock);
39974
39975diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39976index 8ee383d..736b5de 100644
39977--- a/drivers/dma/sh/shdma-base.c
39978+++ b/drivers/dma/sh/shdma-base.c
39979@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39980 schan->slave_id = -EINVAL;
39981 }
39982
39983- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39984- sdev->desc_size, GFP_KERNEL);
39985+ schan->desc = kcalloc(sdev->desc_size,
39986+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39987 if (!schan->desc) {
39988 ret = -ENOMEM;
39989 goto edescalloc;
39990diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39991index 9f1d4c7..fceff78 100644
39992--- a/drivers/dma/sh/shdmac.c
39993+++ b/drivers/dma/sh/shdmac.c
39994@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39995 return ret;
39996 }
39997
39998-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39999+static struct notifier_block sh_dmae_nmi_notifier = {
40000 .notifier_call = sh_dmae_nmi_handler,
40001
40002 /* Run before NMI debug handler and KGDB */
40003diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40004index 592af5f..bb1d583 100644
40005--- a/drivers/edac/edac_device.c
40006+++ b/drivers/edac/edac_device.c
40007@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40008 */
40009 int edac_device_alloc_index(void)
40010 {
40011- static atomic_t device_indexes = ATOMIC_INIT(0);
40012+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40013
40014- return atomic_inc_return(&device_indexes) - 1;
40015+ return atomic_inc_return_unchecked(&device_indexes) - 1;
40016 }
40017 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40018
40019diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40020index c84eecb..4d7381d 100644
40021--- a/drivers/edac/edac_mc_sysfs.c
40022+++ b/drivers/edac/edac_mc_sysfs.c
40023@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40024 struct dev_ch_attribute {
40025 struct device_attribute attr;
40026 int channel;
40027-};
40028+} __do_const;
40029
40030 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40031 static struct dev_ch_attribute dev_attr_legacy_##_name = \
40032@@ -1009,15 +1009,17 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
40033 }
40034
40035 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
40036+ pax_open_kernel();
40037 if (mci->get_sdram_scrub_rate) {
40038- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40039- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40040+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40041+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40042 }
40043
40044 if (mci->set_sdram_scrub_rate) {
40045- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40046- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40047+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40048+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40049 }
40050+ pax_close_kernel();
40051
40052 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
40053 if (err) {
40054diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40055index 2cf44b4d..6dd2dc7 100644
40056--- a/drivers/edac/edac_pci.c
40057+++ b/drivers/edac/edac_pci.c
40058@@ -29,7 +29,7 @@
40059
40060 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40061 static LIST_HEAD(edac_pci_list);
40062-static atomic_t pci_indexes = ATOMIC_INIT(0);
40063+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40064
40065 /*
40066 * edac_pci_alloc_ctl_info
40067@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40068 */
40069 int edac_pci_alloc_index(void)
40070 {
40071- return atomic_inc_return(&pci_indexes) - 1;
40072+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40073 }
40074 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40075
40076diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40077index 24d877f..4e30133 100644
40078--- a/drivers/edac/edac_pci_sysfs.c
40079+++ b/drivers/edac/edac_pci_sysfs.c
40080@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40081 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40082 static int edac_pci_poll_msec = 1000; /* one second workq period */
40083
40084-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40085-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40086+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40087+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40088
40089 static struct kobject *edac_pci_top_main_kobj;
40090 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40091@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40092 void *value;
40093 ssize_t(*show) (void *, char *);
40094 ssize_t(*store) (void *, const char *, size_t);
40095-};
40096+} __do_const;
40097
40098 /* Set of show/store abstract level functions for PCI Parity object */
40099 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40100@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40101 edac_printk(KERN_CRIT, EDAC_PCI,
40102 "Signaled System Error on %s\n",
40103 pci_name(dev));
40104- atomic_inc(&pci_nonparity_count);
40105+ atomic_inc_unchecked(&pci_nonparity_count);
40106 }
40107
40108 if (status & (PCI_STATUS_PARITY)) {
40109@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40110 "Master Data Parity Error on %s\n",
40111 pci_name(dev));
40112
40113- atomic_inc(&pci_parity_count);
40114+ atomic_inc_unchecked(&pci_parity_count);
40115 }
40116
40117 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40118@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40119 "Detected Parity Error on %s\n",
40120 pci_name(dev));
40121
40122- atomic_inc(&pci_parity_count);
40123+ atomic_inc_unchecked(&pci_parity_count);
40124 }
40125 }
40126
40127@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40128 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40129 "Signaled System Error on %s\n",
40130 pci_name(dev));
40131- atomic_inc(&pci_nonparity_count);
40132+ atomic_inc_unchecked(&pci_nonparity_count);
40133 }
40134
40135 if (status & (PCI_STATUS_PARITY)) {
40136@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40137 "Master Data Parity Error on "
40138 "%s\n", pci_name(dev));
40139
40140- atomic_inc(&pci_parity_count);
40141+ atomic_inc_unchecked(&pci_parity_count);
40142 }
40143
40144 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40145@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40146 "Detected Parity Error on %s\n",
40147 pci_name(dev));
40148
40149- atomic_inc(&pci_parity_count);
40150+ atomic_inc_unchecked(&pci_parity_count);
40151 }
40152 }
40153 }
40154@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40155 if (!check_pci_errors)
40156 return;
40157
40158- before_count = atomic_read(&pci_parity_count);
40159+ before_count = atomic_read_unchecked(&pci_parity_count);
40160
40161 /* scan all PCI devices looking for a Parity Error on devices and
40162 * bridges.
40163@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40164 /* Only if operator has selected panic on PCI Error */
40165 if (edac_pci_get_panic_on_pe()) {
40166 /* If the count is different 'after' from 'before' */
40167- if (before_count != atomic_read(&pci_parity_count))
40168+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40169 panic("EDAC: PCI Parity Error");
40170 }
40171 }
40172diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40173index c2359a1..8bd119d 100644
40174--- a/drivers/edac/mce_amd.h
40175+++ b/drivers/edac/mce_amd.h
40176@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40177 bool (*mc0_mce)(u16, u8);
40178 bool (*mc1_mce)(u16, u8);
40179 bool (*mc2_mce)(u16, u8);
40180-};
40181+} __no_const;
40182
40183 void amd_report_gart_errors(bool);
40184 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40185diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40186index 57ea7f4..af06b76 100644
40187--- a/drivers/firewire/core-card.c
40188+++ b/drivers/firewire/core-card.c
40189@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40190 const struct fw_card_driver *driver,
40191 struct device *device)
40192 {
40193- static atomic_t index = ATOMIC_INIT(-1);
40194+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40195
40196- card->index = atomic_inc_return(&index);
40197+ card->index = atomic_inc_return_unchecked(&index);
40198 card->driver = driver;
40199 card->device = device;
40200 card->current_tlabel = 0;
40201@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40202
40203 void fw_core_remove_card(struct fw_card *card)
40204 {
40205- struct fw_card_driver dummy_driver = dummy_driver_template;
40206+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40207
40208 card->driver->update_phy_reg(card, 4,
40209 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40210diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40211index f9e3aee..269dbdb 100644
40212--- a/drivers/firewire/core-device.c
40213+++ b/drivers/firewire/core-device.c
40214@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40215 struct config_rom_attribute {
40216 struct device_attribute attr;
40217 u32 key;
40218-};
40219+} __do_const;
40220
40221 static ssize_t show_immediate(struct device *dev,
40222 struct device_attribute *dattr, char *buf)
40223diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40224index d6a09b9..18e90dd 100644
40225--- a/drivers/firewire/core-transaction.c
40226+++ b/drivers/firewire/core-transaction.c
40227@@ -38,6 +38,7 @@
40228 #include <linux/timer.h>
40229 #include <linux/types.h>
40230 #include <linux/workqueue.h>
40231+#include <linux/sched.h>
40232
40233 #include <asm/byteorder.h>
40234
40235diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40236index e1480ff6..1a429bd 100644
40237--- a/drivers/firewire/core.h
40238+++ b/drivers/firewire/core.h
40239@@ -111,6 +111,7 @@ struct fw_card_driver {
40240
40241 int (*stop_iso)(struct fw_iso_context *ctx);
40242 };
40243+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40244
40245 void fw_card_initialize(struct fw_card *card,
40246 const struct fw_card_driver *driver, struct device *device);
40247diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40248index f51d376..b118e40 100644
40249--- a/drivers/firewire/ohci.c
40250+++ b/drivers/firewire/ohci.c
40251@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40252 be32_to_cpu(ohci->next_header));
40253 }
40254
40255+#ifndef CONFIG_GRKERNSEC
40256 if (param_remote_dma) {
40257 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40258 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40259 }
40260+#endif
40261
40262 spin_unlock_irq(&ohci->lock);
40263
40264@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40265 unsigned long flags;
40266 int n, ret = 0;
40267
40268+#ifndef CONFIG_GRKERNSEC
40269 if (param_remote_dma)
40270 return 0;
40271+#endif
40272
40273 /*
40274 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40275diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40276index 94a58a0..f5eba42 100644
40277--- a/drivers/firmware/dmi-id.c
40278+++ b/drivers/firmware/dmi-id.c
40279@@ -16,7 +16,7 @@
40280 struct dmi_device_attribute{
40281 struct device_attribute dev_attr;
40282 int field;
40283-};
40284+} __do_const;
40285 #define to_dmi_dev_attr(_dev_attr) \
40286 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40287
40288diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40289index 2eebd28b..4261350 100644
40290--- a/drivers/firmware/dmi_scan.c
40291+++ b/drivers/firmware/dmi_scan.c
40292@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40293 if (buf == NULL)
40294 return -1;
40295
40296- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40297+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40298
40299 dmi_unmap(buf);
40300 return 0;
40301diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40302index 4fd9961..52d60ce 100644
40303--- a/drivers/firmware/efi/cper.c
40304+++ b/drivers/firmware/efi/cper.c
40305@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40306 */
40307 u64 cper_next_record_id(void)
40308 {
40309- static atomic64_t seq;
40310+ static atomic64_unchecked_t seq;
40311
40312- if (!atomic64_read(&seq))
40313- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40314+ if (!atomic64_read_unchecked(&seq))
40315+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40316
40317- return atomic64_inc_return(&seq);
40318+ return atomic64_inc_return_unchecked(&seq);
40319 }
40320 EXPORT_SYMBOL_GPL(cper_next_record_id);
40321
40322diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40323index 3061bb8..92b5fcc 100644
40324--- a/drivers/firmware/efi/efi.c
40325+++ b/drivers/firmware/efi/efi.c
40326@@ -160,14 +160,16 @@ static struct attribute_group efi_subsys_attr_group = {
40327 };
40328
40329 static struct efivars generic_efivars;
40330-static struct efivar_operations generic_ops;
40331+static efivar_operations_no_const generic_ops __read_only;
40332
40333 static int generic_ops_register(void)
40334 {
40335- generic_ops.get_variable = efi.get_variable;
40336- generic_ops.set_variable = efi.set_variable;
40337- generic_ops.get_next_variable = efi.get_next_variable;
40338- generic_ops.query_variable_store = efi_query_variable_store;
40339+ pax_open_kernel();
40340+ *(void **)&generic_ops.get_variable = efi.get_variable;
40341+ *(void **)&generic_ops.set_variable = efi.set_variable;
40342+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40343+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40344+ pax_close_kernel();
40345
40346 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40347 }
40348diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40349index 7b2e049..a253334 100644
40350--- a/drivers/firmware/efi/efivars.c
40351+++ b/drivers/firmware/efi/efivars.c
40352@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40353 static int
40354 create_efivars_bin_attributes(void)
40355 {
40356- struct bin_attribute *attr;
40357+ bin_attribute_no_const *attr;
40358 int error;
40359
40360 /* new_var */
40361diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40362index 87b8e3b..c4afb35 100644
40363--- a/drivers/firmware/efi/runtime-map.c
40364+++ b/drivers/firmware/efi/runtime-map.c
40365@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40366 kfree(entry);
40367 }
40368
40369-static struct kobj_type __refdata map_ktype = {
40370+static const struct kobj_type __refconst map_ktype = {
40371 .sysfs_ops = &map_attr_ops,
40372 .default_attrs = def_attrs,
40373 .release = map_release,
40374diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40375index f1ab05e..ab51228 100644
40376--- a/drivers/firmware/google/gsmi.c
40377+++ b/drivers/firmware/google/gsmi.c
40378@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40379 return local_hash_64(input, 32);
40380 }
40381
40382-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40383+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40384 {
40385 .ident = "Google Board",
40386 .matches = {
40387diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40388index 2f569aa..26e4f39 100644
40389--- a/drivers/firmware/google/memconsole.c
40390+++ b/drivers/firmware/google/memconsole.c
40391@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40392 return false;
40393 }
40394
40395-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40396+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40397 {
40398 .ident = "Google Board",
40399 .matches = {
40400@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40401 if (!found_memconsole())
40402 return -ENODEV;
40403
40404- memconsole_bin_attr.size = memconsole_length;
40405+ pax_open_kernel();
40406+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40407+ pax_close_kernel();
40408+
40409 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40410 }
40411
40412diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40413index cc016c61..d35279e 100644
40414--- a/drivers/firmware/memmap.c
40415+++ b/drivers/firmware/memmap.c
40416@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40417 kfree(entry);
40418 }
40419
40420-static struct kobj_type __refdata memmap_ktype = {
40421+static const struct kobj_type __refconst memmap_ktype = {
40422 .release = release_firmware_map_entry,
40423 .sysfs_ops = &memmap_attr_ops,
40424 .default_attrs = def_attrs,
40425diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40426index 3cfcfc6..09d6f117 100644
40427--- a/drivers/gpio/gpio-em.c
40428+++ b/drivers/gpio/gpio-em.c
40429@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40430 struct em_gio_priv *p;
40431 struct resource *io[2], *irq[2];
40432 struct gpio_chip *gpio_chip;
40433- struct irq_chip *irq_chip;
40434+ irq_chip_no_const *irq_chip;
40435 const char *name = dev_name(&pdev->dev);
40436 int ret;
40437
40438diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40439index 7818cd1..1be40e5 100644
40440--- a/drivers/gpio/gpio-ich.c
40441+++ b/drivers/gpio/gpio-ich.c
40442@@ -94,7 +94,7 @@ struct ichx_desc {
40443 * this option allows driver caching written output values
40444 */
40445 bool use_outlvl_cache;
40446-};
40447+} __do_const;
40448
40449 static struct {
40450 spinlock_t lock;
40451diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40452index f476ae2..05e1bdd 100644
40453--- a/drivers/gpio/gpio-omap.c
40454+++ b/drivers/gpio/gpio-omap.c
40455@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40456 const struct omap_gpio_platform_data *pdata;
40457 struct resource *res;
40458 struct gpio_bank *bank;
40459- struct irq_chip *irqc;
40460+ irq_chip_no_const *irqc;
40461 int ret;
40462
40463 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40464diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40465index c49522e..9a7ee54 100644
40466--- a/drivers/gpio/gpio-rcar.c
40467+++ b/drivers/gpio/gpio-rcar.c
40468@@ -348,7 +348,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40469 struct gpio_rcar_priv *p;
40470 struct resource *io, *irq;
40471 struct gpio_chip *gpio_chip;
40472- struct irq_chip *irq_chip;
40473+ irq_chip_no_const *irq_chip;
40474 struct device *dev = &pdev->dev;
40475 const char *name = dev_name(dev);
40476 int ret;
40477diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40478index c1caa45..f0f97d2 100644
40479--- a/drivers/gpio/gpio-vr41xx.c
40480+++ b/drivers/gpio/gpio-vr41xx.c
40481@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40482 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40483 maskl, pendl, maskh, pendh);
40484
40485- atomic_inc(&irq_err_count);
40486+ atomic_inc_unchecked(&irq_err_count);
40487
40488 return -EINVAL;
40489 }
40490diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40491index 1ca9295..9f3d481 100644
40492--- a/drivers/gpio/gpiolib.c
40493+++ b/drivers/gpio/gpiolib.c
40494@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40495 }
40496
40497 if (gpiochip->irqchip) {
40498- gpiochip->irqchip->irq_request_resources = NULL;
40499- gpiochip->irqchip->irq_release_resources = NULL;
40500+ pax_open_kernel();
40501+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40502+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40503+ pax_close_kernel();
40504 gpiochip->irqchip = NULL;
40505 }
40506 }
40507@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40508 gpiochip->irqchip = NULL;
40509 return -EINVAL;
40510 }
40511- irqchip->irq_request_resources = gpiochip_irq_reqres;
40512- irqchip->irq_release_resources = gpiochip_irq_relres;
40513+
40514+ pax_open_kernel();
40515+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40516+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40517+ pax_close_kernel();
40518
40519 /*
40520 * Prepare the mapping since the irqchip shall be orthogonal to
40521diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40522index 488f51d..301d462 100644
40523--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40524+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40525@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40526 enum cache_policy alternate_policy,
40527 void __user *alternate_aperture_base,
40528 uint64_t alternate_aperture_size);
40529-};
40530+} __no_const;
40531
40532 /**
40533 * struct device_queue_manager
40534diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40535index 5940531..a75b0e5 100644
40536--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40537+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40538@@ -62,7 +62,7 @@ struct kernel_queue_ops {
40539
40540 void (*submit_packet)(struct kernel_queue *kq);
40541 void (*rollback_packet)(struct kernel_queue *kq);
40542-};
40543+} __no_const;
40544
40545 struct kernel_queue {
40546 struct kernel_queue_ops ops;
40547diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
40548index 9b23525..65f4110 100644
40549--- a/drivers/gpu/drm/drm_context.c
40550+++ b/drivers/gpu/drm/drm_context.c
40551@@ -53,6 +53,9 @@ struct drm_ctx_list {
40552 */
40553 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
40554 {
40555+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40556+ return;
40557+
40558 mutex_lock(&dev->struct_mutex);
40559 idr_remove(&dev->ctx_idr, ctx_handle);
40560 mutex_unlock(&dev->struct_mutex);
40561@@ -87,6 +90,9 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
40562 */
40563 int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40564 {
40565+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40566+ return -EINVAL;
40567+
40568 idr_init(&dev->ctx_idr);
40569 return 0;
40570 }
40571@@ -101,6 +107,9 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40572 */
40573 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
40574 {
40575+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40576+ return;
40577+
40578 mutex_lock(&dev->struct_mutex);
40579 idr_destroy(&dev->ctx_idr);
40580 mutex_unlock(&dev->struct_mutex);
40581@@ -119,11 +128,14 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
40582 {
40583 struct drm_ctx_list *pos, *tmp;
40584
40585+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40586+ return;
40587+
40588 mutex_lock(&dev->ctxlist_mutex);
40589
40590 list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
40591 if (pos->tag == file &&
40592- pos->handle != DRM_KERNEL_CONTEXT) {
40593+ _DRM_LOCKING_CONTEXT(pos->handle) != DRM_KERNEL_CONTEXT) {
40594 if (dev->driver->context_dtor)
40595 dev->driver->context_dtor(dev, pos->handle);
40596
40597@@ -161,6 +173,9 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
40598 struct drm_local_map *map;
40599 struct drm_map_list *_entry;
40600
40601+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40602+ return -EINVAL;
40603+
40604 mutex_lock(&dev->struct_mutex);
40605
40606 map = idr_find(&dev->ctx_idr, request->ctx_id);
40607@@ -205,6 +220,9 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
40608 struct drm_local_map *map = NULL;
40609 struct drm_map_list *r_list = NULL;
40610
40611+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40612+ return -EINVAL;
40613+
40614 mutex_lock(&dev->struct_mutex);
40615 list_for_each_entry(r_list, &dev->maplist, head) {
40616 if (r_list->map
40617@@ -277,7 +295,13 @@ static int drm_context_switch_complete(struct drm_device *dev,
40618 {
40619 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
40620
40621- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40622+ if (file_priv->master->lock.hw_lock == NULL) {
40623+ DRM_ERROR(
40624+ "Device has been unregistered. Hard exit. Process %d\n",
40625+ task_pid_nr(current));
40626+ send_sig(SIGTERM, current, 0);
40627+ return -EPERM;
40628+ } else if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40629 DRM_ERROR("Lock isn't held after context switch\n");
40630 }
40631
40632@@ -305,6 +329,9 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
40633 struct drm_ctx ctx;
40634 int i;
40635
40636+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40637+ return -EINVAL;
40638+
40639 if (res->count >= DRM_RESERVED_CONTEXTS) {
40640 memset(&ctx, 0, sizeof(ctx));
40641 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
40642@@ -335,8 +362,11 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
40643 struct drm_ctx_list *ctx_entry;
40644 struct drm_ctx *ctx = data;
40645
40646+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40647+ return -EINVAL;
40648+
40649 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40650- if (ctx->handle == DRM_KERNEL_CONTEXT) {
40651+ if (_DRM_LOCKING_CONTEXT(ctx->handle) == DRM_KERNEL_CONTEXT) {
40652 /* Skip kernel's context and get a new one. */
40653 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40654 }
40655@@ -378,6 +408,9 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
40656 {
40657 struct drm_ctx *ctx = data;
40658
40659+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40660+ return -EINVAL;
40661+
40662 /* This is 0, because we don't handle any context flags */
40663 ctx->flags = 0;
40664
40665@@ -400,6 +433,9 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
40666 {
40667 struct drm_ctx *ctx = data;
40668
40669+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40670+ return -EINVAL;
40671+
40672 DRM_DEBUG("%d\n", ctx->handle);
40673 return drm_context_switch(dev, dev->last_context, ctx->handle);
40674 }
40675@@ -420,6 +456,9 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
40676 {
40677 struct drm_ctx *ctx = data;
40678
40679+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40680+ return -EINVAL;
40681+
40682 DRM_DEBUG("%d\n", ctx->handle);
40683 drm_context_switch_complete(dev, file_priv, ctx->handle);
40684
40685@@ -442,8 +481,11 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
40686 {
40687 struct drm_ctx *ctx = data;
40688
40689+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40690+ return -EINVAL;
40691+
40692 DRM_DEBUG("%d\n", ctx->handle);
40693- if (ctx->handle != DRM_KERNEL_CONTEXT) {
40694+ if (_DRM_LOCKING_CONTEXT(ctx->handle) != DRM_KERNEL_CONTEXT) {
40695 if (dev->driver->context_dtor)
40696 dev->driver->context_dtor(dev, ctx->handle);
40697 drm_legacy_ctxbitmap_free(dev, ctx->handle);
40698diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40699index b6f076b..2918de2 100644
40700--- a/drivers/gpu/drm/drm_crtc.c
40701+++ b/drivers/gpu/drm/drm_crtc.c
40702@@ -4118,7 +4118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40703 goto done;
40704 }
40705
40706- if (copy_to_user(&enum_ptr[copied].name,
40707+ if (copy_to_user(enum_ptr[copied].name,
40708 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40709 ret = -EFAULT;
40710 goto done;
40711diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40712index d512134..a80a8e4 100644
40713--- a/drivers/gpu/drm/drm_drv.c
40714+++ b/drivers/gpu/drm/drm_drv.c
40715@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40716
40717 drm_device_set_unplugged(dev);
40718
40719- if (dev->open_count == 0) {
40720+ if (local_read(&dev->open_count) == 0) {
40721 drm_put_dev(dev);
40722 }
40723 mutex_unlock(&drm_global_mutex);
40724@@ -596,10 +596,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
40725 if (drm_ht_create(&dev->map_hash, 12))
40726 goto err_minors;
40727
40728- ret = drm_legacy_ctxbitmap_init(dev);
40729- if (ret) {
40730- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
40731- goto err_ht;
40732+ if (drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT)) {
40733+ ret = drm_legacy_ctxbitmap_init(dev);
40734+ if (ret) {
40735+ DRM_ERROR(
40736+ "Cannot allocate memory for context bitmap.\n");
40737+ goto err_ht;
40738+ }
40739 }
40740
40741 if (drm_core_check_feature(dev, DRIVER_GEM)) {
40742diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40743index 076dd60..e4a4ba7 100644
40744--- a/drivers/gpu/drm/drm_fops.c
40745+++ b/drivers/gpu/drm/drm_fops.c
40746@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40747 return PTR_ERR(minor);
40748
40749 dev = minor->dev;
40750- if (!dev->open_count++)
40751+ if (local_inc_return(&dev->open_count) == 1)
40752 need_setup = 1;
40753
40754 /* share address_space across all char-devs of a single device */
40755@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40756 return 0;
40757
40758 err_undo:
40759- dev->open_count--;
40760+ local_dec(&dev->open_count);
40761 drm_minor_release(minor);
40762 return retcode;
40763 }
40764@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40765
40766 mutex_lock(&drm_global_mutex);
40767
40768- DRM_DEBUG("open_count = %d\n", dev->open_count);
40769+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40770
40771 mutex_lock(&dev->struct_mutex);
40772 list_del(&file_priv->lhead);
40773@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40774 * Begin inline drm_release
40775 */
40776
40777- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40778+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40779 task_pid_nr(current),
40780 (long)old_encode_dev(file_priv->minor->kdev->devt),
40781- dev->open_count);
40782+ local_read(&dev->open_count));
40783
40784 /* Release any auth tokens that might point to this file_priv,
40785 (do that under the drm_global_mutex) */
40786@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40787 * End inline drm_release
40788 */
40789
40790- if (!--dev->open_count) {
40791+ if (local_dec_and_test(&dev->open_count)) {
40792 retcode = drm_lastclose(dev);
40793 if (drm_device_is_unplugged(dev))
40794 drm_put_dev(dev);
40795diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40796index 3d2e91c..d31c4c9 100644
40797--- a/drivers/gpu/drm/drm_global.c
40798+++ b/drivers/gpu/drm/drm_global.c
40799@@ -36,7 +36,7 @@
40800 struct drm_global_item {
40801 struct mutex mutex;
40802 void *object;
40803- int refcount;
40804+ atomic_t refcount;
40805 };
40806
40807 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40808@@ -49,7 +49,7 @@ void drm_global_init(void)
40809 struct drm_global_item *item = &glob[i];
40810 mutex_init(&item->mutex);
40811 item->object = NULL;
40812- item->refcount = 0;
40813+ atomic_set(&item->refcount, 0);
40814 }
40815 }
40816
40817@@ -59,7 +59,7 @@ void drm_global_release(void)
40818 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40819 struct drm_global_item *item = &glob[i];
40820 BUG_ON(item->object != NULL);
40821- BUG_ON(item->refcount != 0);
40822+ BUG_ON(atomic_read(&item->refcount) != 0);
40823 }
40824 }
40825
40826@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40827 struct drm_global_item *item = &glob[ref->global_type];
40828
40829 mutex_lock(&item->mutex);
40830- if (item->refcount == 0) {
40831+ if (atomic_read(&item->refcount) == 0) {
40832 item->object = kzalloc(ref->size, GFP_KERNEL);
40833 if (unlikely(item->object == NULL)) {
40834 ret = -ENOMEM;
40835@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40836 goto out_err;
40837
40838 }
40839- ++item->refcount;
40840+ atomic_inc(&item->refcount);
40841 ref->object = item->object;
40842 mutex_unlock(&item->mutex);
40843 return 0;
40844@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40845 struct drm_global_item *item = &glob[ref->global_type];
40846
40847 mutex_lock(&item->mutex);
40848- BUG_ON(item->refcount == 0);
40849+ BUG_ON(atomic_read(&item->refcount) == 0);
40850 BUG_ON(ref->object != item->object);
40851- if (--item->refcount == 0) {
40852+ if (atomic_dec_and_test(&item->refcount)) {
40853 ref->release(ref);
40854 item->object = NULL;
40855 }
40856diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40857index f1b32f9..394f791 100644
40858--- a/drivers/gpu/drm/drm_info.c
40859+++ b/drivers/gpu/drm/drm_info.c
40860@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40861 struct drm_local_map *map;
40862 struct drm_map_list *r_list;
40863
40864- /* Hardcoded from _DRM_FRAME_BUFFER,
40865- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40866- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40867- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40868+ static const char * const types[] = {
40869+ [_DRM_FRAME_BUFFER] = "FB",
40870+ [_DRM_REGISTERS] = "REG",
40871+ [_DRM_SHM] = "SHM",
40872+ [_DRM_AGP] = "AGP",
40873+ [_DRM_SCATTER_GATHER] = "SG",
40874+ [_DRM_CONSISTENT] = "PCI"};
40875 const char *type;
40876 int i;
40877
40878@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40879 map = r_list->map;
40880 if (!map)
40881 continue;
40882- if (map->type < 0 || map->type > 5)
40883+ if (map->type >= ARRAY_SIZE(types))
40884 type = "??";
40885 else
40886 type = types[map->type];
40887diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40888index 2f4c4343..dd12cd2 100644
40889--- a/drivers/gpu/drm/drm_ioc32.c
40890+++ b/drivers/gpu/drm/drm_ioc32.c
40891@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40892 request = compat_alloc_user_space(nbytes);
40893 if (!access_ok(VERIFY_WRITE, request, nbytes))
40894 return -EFAULT;
40895- list = (struct drm_buf_desc *) (request + 1);
40896+ list = (struct drm_buf_desc __user *) (request + 1);
40897
40898 if (__put_user(count, &request->count)
40899 || __put_user(list, &request->list))
40900@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40901 request = compat_alloc_user_space(nbytes);
40902 if (!access_ok(VERIFY_WRITE, request, nbytes))
40903 return -EFAULT;
40904- list = (struct drm_buf_pub *) (request + 1);
40905+ list = (struct drm_buf_pub __user *) (request + 1);
40906
40907 if (__put_user(count, &request->count)
40908 || __put_user(list, &request->list))
40909@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40910 return 0;
40911 }
40912
40913-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40914+drm_ioctl_compat_t drm_compat_ioctls[] = {
40915 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40916 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40917 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40918@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40919 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40920 {
40921 unsigned int nr = DRM_IOCTL_NR(cmd);
40922- drm_ioctl_compat_t *fn;
40923 int ret;
40924
40925 /* Assume that ioctls without an explicit compat routine will just
40926@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40927 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40928 return drm_ioctl(filp, cmd, arg);
40929
40930- fn = drm_compat_ioctls[nr];
40931-
40932- if (fn != NULL)
40933- ret = (*fn) (filp, cmd, arg);
40934+ if (drm_compat_ioctls[nr] != NULL)
40935+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40936 else
40937 ret = drm_ioctl(filp, cmd, arg);
40938
40939diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40940index 3785d66..1c489ef 100644
40941--- a/drivers/gpu/drm/drm_ioctl.c
40942+++ b/drivers/gpu/drm/drm_ioctl.c
40943@@ -655,7 +655,7 @@ long drm_ioctl(struct file *filp,
40944 struct drm_file *file_priv = filp->private_data;
40945 struct drm_device *dev;
40946 const struct drm_ioctl_desc *ioctl = NULL;
40947- drm_ioctl_t *func;
40948+ drm_ioctl_no_const_t func;
40949 unsigned int nr = DRM_IOCTL_NR(cmd);
40950 int retcode = -EINVAL;
40951 char stack_kdata[128];
40952diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
40953index f861361..b61d4c7 100644
40954--- a/drivers/gpu/drm/drm_lock.c
40955+++ b/drivers/gpu/drm/drm_lock.c
40956@@ -61,9 +61,12 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
40957 struct drm_master *master = file_priv->master;
40958 int ret = 0;
40959
40960+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40961+ return -EINVAL;
40962+
40963 ++file_priv->lock_count;
40964
40965- if (lock->context == DRM_KERNEL_CONTEXT) {
40966+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
40967 DRM_ERROR("Process %d using kernel context %d\n",
40968 task_pid_nr(current), lock->context);
40969 return -EINVAL;
40970@@ -153,12 +156,23 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
40971 struct drm_lock *lock = data;
40972 struct drm_master *master = file_priv->master;
40973
40974- if (lock->context == DRM_KERNEL_CONTEXT) {
40975+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40976+ return -EINVAL;
40977+
40978+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
40979 DRM_ERROR("Process %d using kernel context %d\n",
40980 task_pid_nr(current), lock->context);
40981 return -EINVAL;
40982 }
40983
40984+ if (!master->lock.hw_lock) {
40985+ DRM_ERROR(
40986+ "Device has been unregistered. Hard exit. Process %d\n",
40987+ task_pid_nr(current));
40988+ send_sig(SIGTERM, current, 0);
40989+ return -EPERM;
40990+ }
40991+
40992 if (drm_legacy_lock_free(&master->lock, lock->context)) {
40993 /* FIXME: Should really bail out here. */
40994 }
40995diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40996index d4813e0..6c1ab4d 100644
40997--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40998+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
40999@@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
41000 u32 pipeconf_reg = PIPEACONF;
41001 u32 dspcntr_reg = DSPACNTR;
41002
41003- u32 pipeconf = dev_priv->pipeconf[pipe];
41004- u32 dspcntr = dev_priv->dspcntr[pipe];
41005+ u32 pipeconf;
41006+ u32 dspcntr;
41007 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
41008
41009+ if (pipe == -1)
41010+ return;
41011+
41012+ pipeconf = dev_priv->pipeconf[pipe];
41013+ dspcntr = dev_priv->dspcntr[pipe];
41014+
41015 if (pipe) {
41016 pipeconf_reg = PIPECCONF;
41017 dspcntr_reg = DSPCCNTR;
41018diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41019index 93ec5dc..82acbaf 100644
41020--- a/drivers/gpu/drm/i810/i810_drv.h
41021+++ b/drivers/gpu/drm/i810/i810_drv.h
41022@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
41023 int page_flipping;
41024
41025 wait_queue_head_t irq_queue;
41026- atomic_t irq_received;
41027- atomic_t irq_emitted;
41028+ atomic_unchecked_t irq_received;
41029+ atomic_unchecked_t irq_emitted;
41030
41031 int front_offset;
41032 } drm_i810_private_t;
41033diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41034index 1a46787..7fb387c 100644
41035--- a/drivers/gpu/drm/i915/i915_dma.c
41036+++ b/drivers/gpu/drm/i915/i915_dma.c
41037@@ -149,6 +149,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
41038 case I915_PARAM_MMAP_VERSION:
41039 value = 1;
41040 break;
41041+ case I915_PARAM_HAS_LEGACY_CONTEXT:
41042+ value = drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT);
41043+ break;
41044 default:
41045 DRM_DEBUG("Unknown parameter %d\n", param->param);
41046 return -EINVAL;
41047@@ -362,7 +365,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41048 * locking inversion with the driver load path. And the access here is
41049 * completely racy anyway. So don't bother with locking for now.
41050 */
41051- return dev->open_count == 0;
41052+ return local_read(&dev->open_count) == 0;
41053 }
41054
41055 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41056diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41057index 38a7425..5322b16 100644
41058--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41059+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41060@@ -872,12 +872,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41061 static int
41062 validate_exec_list(struct drm_device *dev,
41063 struct drm_i915_gem_exec_object2 *exec,
41064- int count)
41065+ unsigned int count)
41066 {
41067 unsigned relocs_total = 0;
41068 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41069 unsigned invalid_flags;
41070- int i;
41071+ unsigned int i;
41072
41073 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
41074 if (USES_FULL_PPGTT(dev))
41075diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41076index 176de63..b50b66a 100644
41077--- a/drivers/gpu/drm/i915/i915_ioc32.c
41078+++ b/drivers/gpu/drm/i915/i915_ioc32.c
41079@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
41080 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
41081 || __put_user(batchbuffer32.num_cliprects,
41082 &batchbuffer->num_cliprects)
41083- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
41084+ || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
41085 &batchbuffer->cliprects))
41086 return -EFAULT;
41087
41088@@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
41089
41090 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
41091 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
41092- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
41093+ || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
41094 &cmdbuffer->buf)
41095 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
41096 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
41097 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
41098 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
41099- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
41100+ || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
41101 &cmdbuffer->cliprects))
41102 return -EFAULT;
41103
41104@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41105 (unsigned long)request);
41106 }
41107
41108-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41109+static drm_ioctl_compat_t i915_compat_ioctls[] = {
41110 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41111 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41112 [DRM_I915_GETPARAM] = compat_i915_getparam,
41113@@ -201,17 +201,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41114 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41115 {
41116 unsigned int nr = DRM_IOCTL_NR(cmd);
41117- drm_ioctl_compat_t *fn = NULL;
41118 int ret;
41119
41120 if (nr < DRM_COMMAND_BASE)
41121 return drm_compat_ioctl(filp, cmd, arg);
41122
41123- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41124- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41125-
41126- if (fn != NULL)
41127- ret = (*fn) (filp, cmd, arg);
41128+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
41129+ ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
41130 else
41131 ret = drm_ioctl(filp, cmd, arg);
41132
41133diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41134index f75173c..f283e45 100644
41135--- a/drivers/gpu/drm/i915/intel_display.c
41136+++ b/drivers/gpu/drm/i915/intel_display.c
41137@@ -13056,13 +13056,13 @@ struct intel_quirk {
41138 int subsystem_vendor;
41139 int subsystem_device;
41140 void (*hook)(struct drm_device *dev);
41141-};
41142+} __do_const;
41143
41144 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41145 struct intel_dmi_quirk {
41146 void (*hook)(struct drm_device *dev);
41147 const struct dmi_system_id (*dmi_id_list)[];
41148-};
41149+} __do_const;
41150
41151 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41152 {
41153@@ -13070,18 +13070,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41154 return 1;
41155 }
41156
41157-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41158+static const struct dmi_system_id intel_dmi_quirks_table[] = {
41159 {
41160- .dmi_id_list = &(const struct dmi_system_id[]) {
41161- {
41162- .callback = intel_dmi_reverse_brightness,
41163- .ident = "NCR Corporation",
41164- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41165- DMI_MATCH(DMI_PRODUCT_NAME, ""),
41166- },
41167- },
41168- { } /* terminating entry */
41169+ .callback = intel_dmi_reverse_brightness,
41170+ .ident = "NCR Corporation",
41171+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41172+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
41173 },
41174+ },
41175+ { } /* terminating entry */
41176+};
41177+
41178+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41179+ {
41180+ .dmi_id_list = &intel_dmi_quirks_table,
41181 .hook = quirk_invert_brightness,
41182 },
41183 };
41184diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
41185index a002f53..0d60514 100644
41186--- a/drivers/gpu/drm/imx/imx-drm-core.c
41187+++ b/drivers/gpu/drm/imx/imx-drm-core.c
41188@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
41189 if (imxdrm->pipes >= MAX_CRTC)
41190 return -EINVAL;
41191
41192- if (imxdrm->drm->open_count)
41193+ if (local_read(&imxdrm->drm->open_count))
41194 return -EBUSY;
41195
41196 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
41197diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41198index b4a20149..219ab78 100644
41199--- a/drivers/gpu/drm/mga/mga_drv.h
41200+++ b/drivers/gpu/drm/mga/mga_drv.h
41201@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
41202 u32 clear_cmd;
41203 u32 maccess;
41204
41205- atomic_t vbl_received; /**< Number of vblanks received. */
41206+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41207 wait_queue_head_t fence_queue;
41208- atomic_t last_fence_retired;
41209+ atomic_unchecked_t last_fence_retired;
41210 u32 next_fence_to_post;
41211
41212 unsigned int fb_cpp;
41213diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41214index 729bfd5..14bae78 100644
41215--- a/drivers/gpu/drm/mga/mga_ioc32.c
41216+++ b/drivers/gpu/drm/mga/mga_ioc32.c
41217@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41218 return 0;
41219 }
41220
41221-drm_ioctl_compat_t *mga_compat_ioctls[] = {
41222+drm_ioctl_compat_t mga_compat_ioctls[] = {
41223 [DRM_MGA_INIT] = compat_mga_init,
41224 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41225 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41226@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41227 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41228 {
41229 unsigned int nr = DRM_IOCTL_NR(cmd);
41230- drm_ioctl_compat_t *fn = NULL;
41231 int ret;
41232
41233 if (nr < DRM_COMMAND_BASE)
41234 return drm_compat_ioctl(filp, cmd, arg);
41235
41236- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41237- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41238-
41239- if (fn != NULL)
41240- ret = (*fn) (filp, cmd, arg);
41241+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
41242+ ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41243 else
41244 ret = drm_ioctl(filp, cmd, arg);
41245
41246diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41247index 1b071b8..de8601a 100644
41248--- a/drivers/gpu/drm/mga/mga_irq.c
41249+++ b/drivers/gpu/drm/mga/mga_irq.c
41250@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41251 if (crtc != 0)
41252 return 0;
41253
41254- return atomic_read(&dev_priv->vbl_received);
41255+ return atomic_read_unchecked(&dev_priv->vbl_received);
41256 }
41257
41258
41259@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41260 /* VBLANK interrupt */
41261 if (status & MGA_VLINEPEN) {
41262 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41263- atomic_inc(&dev_priv->vbl_received);
41264+ atomic_inc_unchecked(&dev_priv->vbl_received);
41265 drm_handle_vblank(dev, 0);
41266 handled = 1;
41267 }
41268@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41269 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41270 MGA_WRITE(MGA_PRIMEND, prim_end);
41271
41272- atomic_inc(&dev_priv->last_fence_retired);
41273+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
41274 wake_up(&dev_priv->fence_queue);
41275 handled = 1;
41276 }
41277@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41278 * using fences.
41279 */
41280 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41281- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41282+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41283 - *sequence) <= (1 << 23)));
41284
41285 *sequence = cur_fence;
41286diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41287index 0190b69..60c3eaf 100644
41288--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41289+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41290@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41291 struct bit_table {
41292 const char id;
41293 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41294-};
41295+} __no_const;
41296
41297 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41298
41299diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
41300index 8763deb..936b423 100644
41301--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
41302+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
41303@@ -940,7 +940,8 @@ static struct drm_driver
41304 driver_stub = {
41305 .driver_features =
41306 DRIVER_USE_AGP |
41307- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
41308+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
41309+ DRIVER_KMS_LEGACY_CONTEXT,
41310
41311 .load = nouveau_drm_load,
41312 .unload = nouveau_drm_unload,
41313diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41314index fc68f09..0511d71 100644
41315--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41316+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41317@@ -121,7 +121,6 @@ struct nouveau_drm {
41318 struct drm_global_reference mem_global_ref;
41319 struct ttm_bo_global_ref bo_global_ref;
41320 struct ttm_bo_device bdev;
41321- atomic_t validate_sequence;
41322 int (*move)(struct nouveau_channel *,
41323 struct ttm_buffer_object *,
41324 struct ttm_mem_reg *, struct ttm_mem_reg *);
41325diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41326index 462679a..88e32a7 100644
41327--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41328+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41329@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41330 unsigned long arg)
41331 {
41332 unsigned int nr = DRM_IOCTL_NR(cmd);
41333- drm_ioctl_compat_t *fn = NULL;
41334+ drm_ioctl_compat_t fn = NULL;
41335 int ret;
41336
41337 if (nr < DRM_COMMAND_BASE)
41338diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41339index 273e501..3b6c0a2 100644
41340--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41341+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41342@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41343 }
41344
41345 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41346- nouveau_vram_manager_init,
41347- nouveau_vram_manager_fini,
41348- nouveau_vram_manager_new,
41349- nouveau_vram_manager_del,
41350- nouveau_vram_manager_debug
41351+ .init = nouveau_vram_manager_init,
41352+ .takedown = nouveau_vram_manager_fini,
41353+ .get_node = nouveau_vram_manager_new,
41354+ .put_node = nouveau_vram_manager_del,
41355+ .debug = nouveau_vram_manager_debug
41356 };
41357
41358 static int
41359@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41360 }
41361
41362 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41363- nouveau_gart_manager_init,
41364- nouveau_gart_manager_fini,
41365- nouveau_gart_manager_new,
41366- nouveau_gart_manager_del,
41367- nouveau_gart_manager_debug
41368+ .init = nouveau_gart_manager_init,
41369+ .takedown = nouveau_gart_manager_fini,
41370+ .get_node = nouveau_gart_manager_new,
41371+ .put_node = nouveau_gart_manager_del,
41372+ .debug = nouveau_gart_manager_debug
41373 };
41374
41375 /*XXX*/
41376@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41377 }
41378
41379 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41380- nv04_gart_manager_init,
41381- nv04_gart_manager_fini,
41382- nv04_gart_manager_new,
41383- nv04_gart_manager_del,
41384- nv04_gart_manager_debug
41385+ .init = nv04_gart_manager_init,
41386+ .takedown = nv04_gart_manager_fini,
41387+ .get_node = nv04_gart_manager_new,
41388+ .put_node = nv04_gart_manager_del,
41389+ .debug = nv04_gart_manager_debug
41390 };
41391
41392 int
41393diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41394index c7592ec..dd45ebc 100644
41395--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41396+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41397@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41398 * locking inversion with the driver load path. And the access here is
41399 * completely racy anyway. So don't bother with locking for now.
41400 */
41401- return dev->open_count == 0;
41402+ return local_read(&dev->open_count) == 0;
41403 }
41404
41405 static const struct vga_switcheroo_client_ops
41406diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41407index 9782364..89bd954 100644
41408--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41409+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41410@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41411 int ret;
41412
41413 mutex_lock(&qdev->async_io_mutex);
41414- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41415+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41416 if (qdev->last_sent_io_cmd > irq_num) {
41417 if (intr)
41418 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41419- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41420+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41421 else
41422 ret = wait_event_timeout(qdev->io_cmd_event,
41423- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41424+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41425 /* 0 is timeout, just bail the "hw" has gone away */
41426 if (ret <= 0)
41427 goto out;
41428- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41429+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41430 }
41431 outb(val, addr);
41432 qdev->last_sent_io_cmd = irq_num + 1;
41433 if (intr)
41434 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41435- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41436+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41437 else
41438 ret = wait_event_timeout(qdev->io_cmd_event,
41439- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41440+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41441 out:
41442 if (ret > 0)
41443 ret = 0;
41444diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41445index 6911b8c..89d6867 100644
41446--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41447+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41448@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41449 struct drm_info_node *node = (struct drm_info_node *) m->private;
41450 struct qxl_device *qdev = node->minor->dev->dev_private;
41451
41452- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41453- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41454- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41455- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41456+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41457+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41458+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41459+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41460 seq_printf(m, "%d\n", qdev->irq_received_error);
41461 return 0;
41462 }
41463diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41464index 7c6cafe..460f542 100644
41465--- a/drivers/gpu/drm/qxl/qxl_drv.h
41466+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41467@@ -290,10 +290,10 @@ struct qxl_device {
41468 unsigned int last_sent_io_cmd;
41469
41470 /* interrupt handling */
41471- atomic_t irq_received;
41472- atomic_t irq_received_display;
41473- atomic_t irq_received_cursor;
41474- atomic_t irq_received_io_cmd;
41475+ atomic_unchecked_t irq_received;
41476+ atomic_unchecked_t irq_received_display;
41477+ atomic_unchecked_t irq_received_cursor;
41478+ atomic_unchecked_t irq_received_io_cmd;
41479 unsigned irq_received_error;
41480 wait_queue_head_t display_event;
41481 wait_queue_head_t cursor_event;
41482diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41483index b110883..dd06418 100644
41484--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41485+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41486@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41487
41488 /* TODO copy slow path code from i915 */
41489 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41490- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41491+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41492
41493 {
41494 struct qxl_drawable *draw = fb_cmd;
41495@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41496 struct drm_qxl_reloc reloc;
41497
41498 if (copy_from_user(&reloc,
41499- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41500+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41501 sizeof(reloc))) {
41502 ret = -EFAULT;
41503 goto out_free_bos;
41504@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41505
41506 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41507
41508- struct drm_qxl_command *commands =
41509- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41510+ struct drm_qxl_command __user *commands =
41511+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41512
41513- if (copy_from_user(&user_cmd, &commands[cmd_num],
41514+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41515 sizeof(user_cmd)))
41516 return -EFAULT;
41517
41518diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41519index 0bf1e20..42a7310 100644
41520--- a/drivers/gpu/drm/qxl/qxl_irq.c
41521+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41522@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41523 if (!pending)
41524 return IRQ_NONE;
41525
41526- atomic_inc(&qdev->irq_received);
41527+ atomic_inc_unchecked(&qdev->irq_received);
41528
41529 if (pending & QXL_INTERRUPT_DISPLAY) {
41530- atomic_inc(&qdev->irq_received_display);
41531+ atomic_inc_unchecked(&qdev->irq_received_display);
41532 wake_up_all(&qdev->display_event);
41533 qxl_queue_garbage_collect(qdev, false);
41534 }
41535 if (pending & QXL_INTERRUPT_CURSOR) {
41536- atomic_inc(&qdev->irq_received_cursor);
41537+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41538 wake_up_all(&qdev->cursor_event);
41539 }
41540 if (pending & QXL_INTERRUPT_IO_CMD) {
41541- atomic_inc(&qdev->irq_received_io_cmd);
41542+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41543 wake_up_all(&qdev->io_cmd_event);
41544 }
41545 if (pending & QXL_INTERRUPT_ERROR) {
41546@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41547 init_waitqueue_head(&qdev->io_cmd_event);
41548 INIT_WORK(&qdev->client_monitors_config_work,
41549 qxl_client_monitors_config_work_func);
41550- atomic_set(&qdev->irq_received, 0);
41551- atomic_set(&qdev->irq_received_display, 0);
41552- atomic_set(&qdev->irq_received_cursor, 0);
41553- atomic_set(&qdev->irq_received_io_cmd, 0);
41554+ atomic_set_unchecked(&qdev->irq_received, 0);
41555+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41556+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41557+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41558 qdev->irq_received_error = 0;
41559 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41560 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41561diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41562index 0cbc4c9..0e46686 100644
41563--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41564+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41565@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41566 }
41567 }
41568
41569-static struct vm_operations_struct qxl_ttm_vm_ops;
41570+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41571 static const struct vm_operations_struct *ttm_vm_ops;
41572
41573 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41574@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41575 return r;
41576 if (unlikely(ttm_vm_ops == NULL)) {
41577 ttm_vm_ops = vma->vm_ops;
41578+ pax_open_kernel();
41579 qxl_ttm_vm_ops = *ttm_vm_ops;
41580 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41581+ pax_close_kernel();
41582 }
41583 vma->vm_ops = &qxl_ttm_vm_ops;
41584 return 0;
41585@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41586 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41587 {
41588 #if defined(CONFIG_DEBUG_FS)
41589- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41590- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41591- unsigned i;
41592+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41593+ {
41594+ .name = "qxl_mem_mm",
41595+ .show = &qxl_mm_dump_table,
41596+ },
41597+ {
41598+ .name = "qxl_surf_mm",
41599+ .show = &qxl_mm_dump_table,
41600+ }
41601+ };
41602
41603- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41604- if (i == 0)
41605- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41606- else
41607- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41608- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41609- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41610- qxl_mem_types_list[i].driver_features = 0;
41611- if (i == 0)
41612- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41613- else
41614- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41615+ pax_open_kernel();
41616+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41617+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41618+ pax_close_kernel();
41619
41620- }
41621- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41622+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41623 #else
41624 return 0;
41625 #endif
41626diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41627index 2c45ac9..5d740f8 100644
41628--- a/drivers/gpu/drm/r128/r128_cce.c
41629+++ b/drivers/gpu/drm/r128/r128_cce.c
41630@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41631
41632 /* GH: Simple idle check.
41633 */
41634- atomic_set(&dev_priv->idle_count, 0);
41635+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41636
41637 /* We don't support anything other than bus-mastering ring mode,
41638 * but the ring can be in either AGP or PCI space for the ring
41639diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41640index 723e5d6..102dbaf 100644
41641--- a/drivers/gpu/drm/r128/r128_drv.h
41642+++ b/drivers/gpu/drm/r128/r128_drv.h
41643@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41644 int is_pci;
41645 unsigned long cce_buffers_offset;
41646
41647- atomic_t idle_count;
41648+ atomic_unchecked_t idle_count;
41649
41650 int page_flipping;
41651 int current_page;
41652 u32 crtc_offset;
41653 u32 crtc_offset_cntl;
41654
41655- atomic_t vbl_received;
41656+ atomic_unchecked_t vbl_received;
41657
41658 u32 color_fmt;
41659 unsigned int front_offset;
41660diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41661index 663f38c..ec159a1 100644
41662--- a/drivers/gpu/drm/r128/r128_ioc32.c
41663+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41664@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41665 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41666 }
41667
41668-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41669+drm_ioctl_compat_t r128_compat_ioctls[] = {
41670 [DRM_R128_INIT] = compat_r128_init,
41671 [DRM_R128_DEPTH] = compat_r128_depth,
41672 [DRM_R128_STIPPLE] = compat_r128_stipple,
41673@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41674 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41675 {
41676 unsigned int nr = DRM_IOCTL_NR(cmd);
41677- drm_ioctl_compat_t *fn = NULL;
41678 int ret;
41679
41680 if (nr < DRM_COMMAND_BASE)
41681 return drm_compat_ioctl(filp, cmd, arg);
41682
41683- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41684- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41685-
41686- if (fn != NULL)
41687- ret = (*fn) (filp, cmd, arg);
41688+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
41689+ ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41690 else
41691 ret = drm_ioctl(filp, cmd, arg);
41692
41693diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41694index c2ae496..30b5993 100644
41695--- a/drivers/gpu/drm/r128/r128_irq.c
41696+++ b/drivers/gpu/drm/r128/r128_irq.c
41697@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41698 if (crtc != 0)
41699 return 0;
41700
41701- return atomic_read(&dev_priv->vbl_received);
41702+ return atomic_read_unchecked(&dev_priv->vbl_received);
41703 }
41704
41705 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41706@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41707 /* VBLANK interrupt */
41708 if (status & R128_CRTC_VBLANK_INT) {
41709 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41710- atomic_inc(&dev_priv->vbl_received);
41711+ atomic_inc_unchecked(&dev_priv->vbl_received);
41712 drm_handle_vblank(dev, 0);
41713 return IRQ_HANDLED;
41714 }
41715diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41716index 8fd2d9f..18c9660 100644
41717--- a/drivers/gpu/drm/r128/r128_state.c
41718+++ b/drivers/gpu/drm/r128/r128_state.c
41719@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41720
41721 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41722 {
41723- if (atomic_read(&dev_priv->idle_count) == 0)
41724+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41725 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41726 else
41727- atomic_set(&dev_priv->idle_count, 0);
41728+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41729 }
41730
41731 #endif
41732diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41733index b928c17..e5d9400 100644
41734--- a/drivers/gpu/drm/radeon/mkregtable.c
41735+++ b/drivers/gpu/drm/radeon/mkregtable.c
41736@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41737 regex_t mask_rex;
41738 regmatch_t match[4];
41739 char buf[1024];
41740- size_t end;
41741+ long end;
41742 int len;
41743 int done = 0;
41744 int r;
41745 unsigned o;
41746 struct offset *offset;
41747 char last_reg_s[10];
41748- int last_reg;
41749+ unsigned long last_reg;
41750
41751 if (regcomp
41752 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41753diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41754index bd7519f..e1c2cd95 100644
41755--- a/drivers/gpu/drm/radeon/radeon_device.c
41756+++ b/drivers/gpu/drm/radeon/radeon_device.c
41757@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41758 * locking inversion with the driver load path. And the access here is
41759 * completely racy anyway. So don't bother with locking for now.
41760 */
41761- return dev->open_count == 0;
41762+ return local_read(&dev->open_count) == 0;
41763 }
41764
41765 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41766diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41767index 46bd393..6ae4719 100644
41768--- a/drivers/gpu/drm/radeon/radeon_drv.h
41769+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41770@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41771
41772 /* SW interrupt */
41773 wait_queue_head_t swi_queue;
41774- atomic_t swi_emitted;
41775+ atomic_unchecked_t swi_emitted;
41776 int vblank_crtc;
41777 uint32_t irq_enable_reg;
41778 uint32_t r500_disp_irq_reg;
41779diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41780index 0b98ea1..a3c770f 100644
41781--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41782+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41783@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41784 request = compat_alloc_user_space(sizeof(*request));
41785 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41786 || __put_user(req32.param, &request->param)
41787- || __put_user((void __user *)(unsigned long)req32.value,
41788+ || __put_user((unsigned long)req32.value,
41789 &request->value))
41790 return -EFAULT;
41791
41792@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41793 #define compat_radeon_cp_setparam NULL
41794 #endif /* X86_64 || IA64 */
41795
41796-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41797+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41798 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41799 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41800 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41801@@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41802 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41803 {
41804 unsigned int nr = DRM_IOCTL_NR(cmd);
41805- drm_ioctl_compat_t *fn = NULL;
41806 int ret;
41807
41808 if (nr < DRM_COMMAND_BASE)
41809 return drm_compat_ioctl(filp, cmd, arg);
41810
41811- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41812- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41813-
41814- if (fn != NULL)
41815- ret = (*fn) (filp, cmd, arg);
41816+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
41817+ ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41818 else
41819 ret = drm_ioctl(filp, cmd, arg);
41820
41821diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41822index 244b19b..c19226d 100644
41823--- a/drivers/gpu/drm/radeon/radeon_irq.c
41824+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41825@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41826 unsigned int ret;
41827 RING_LOCALS;
41828
41829- atomic_inc(&dev_priv->swi_emitted);
41830- ret = atomic_read(&dev_priv->swi_emitted);
41831+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41832+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41833
41834 BEGIN_RING(4);
41835 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41836@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41837 drm_radeon_private_t *dev_priv =
41838 (drm_radeon_private_t *) dev->dev_private;
41839
41840- atomic_set(&dev_priv->swi_emitted, 0);
41841+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41842 init_waitqueue_head(&dev_priv->swi_queue);
41843
41844 dev->max_vblank_count = 0x001fffff;
41845diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41846index 15aee72..cda326e 100644
41847--- a/drivers/gpu/drm/radeon/radeon_state.c
41848+++ b/drivers/gpu/drm/radeon/radeon_state.c
41849@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41850 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41851 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41852
41853- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41854+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41855 sarea_priv->nbox * sizeof(depth_boxes[0])))
41856 return -EFAULT;
41857
41858@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41859 {
41860 drm_radeon_private_t *dev_priv = dev->dev_private;
41861 drm_radeon_getparam_t *param = data;
41862- int value;
41863+ int value = 0;
41864
41865 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41866
41867diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41868index edafd3c..3af7c9c 100644
41869--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41870+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41871@@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41872 man->size = size >> PAGE_SHIFT;
41873 }
41874
41875-static struct vm_operations_struct radeon_ttm_vm_ops;
41876+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41877 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41878
41879 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41880@@ -1002,8 +1002,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41881 }
41882 if (unlikely(ttm_vm_ops == NULL)) {
41883 ttm_vm_ops = vma->vm_ops;
41884+ pax_open_kernel();
41885 radeon_ttm_vm_ops = *ttm_vm_ops;
41886 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41887+ pax_close_kernel();
41888 }
41889 vma->vm_ops = &radeon_ttm_vm_ops;
41890 return 0;
41891diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41892index 1a52522..8e78043 100644
41893--- a/drivers/gpu/drm/tegra/dc.c
41894+++ b/drivers/gpu/drm/tegra/dc.c
41895@@ -1585,7 +1585,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41896 }
41897
41898 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41899- dc->debugfs_files[i].data = dc;
41900+ *(void **)&dc->debugfs_files[i].data = dc;
41901
41902 err = drm_debugfs_create_files(dc->debugfs_files,
41903 ARRAY_SIZE(debugfs_files),
41904diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41905index ed970f6..4eeea42 100644
41906--- a/drivers/gpu/drm/tegra/dsi.c
41907+++ b/drivers/gpu/drm/tegra/dsi.c
41908@@ -62,7 +62,7 @@ struct tegra_dsi {
41909 struct clk *clk_lp;
41910 struct clk *clk;
41911
41912- struct drm_info_list *debugfs_files;
41913+ drm_info_list_no_const *debugfs_files;
41914 struct drm_minor *minor;
41915 struct dentry *debugfs;
41916
41917diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41918index 7eaaee74..cc2bc04 100644
41919--- a/drivers/gpu/drm/tegra/hdmi.c
41920+++ b/drivers/gpu/drm/tegra/hdmi.c
41921@@ -64,7 +64,7 @@ struct tegra_hdmi {
41922 bool stereo;
41923 bool dvi;
41924
41925- struct drm_info_list *debugfs_files;
41926+ drm_info_list_no_const *debugfs_files;
41927 struct drm_minor *minor;
41928 struct dentry *debugfs;
41929 };
41930diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41931index aa0bd054..aea6a01 100644
41932--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41933+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41934@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41935 }
41936
41937 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41938- ttm_bo_man_init,
41939- ttm_bo_man_takedown,
41940- ttm_bo_man_get_node,
41941- ttm_bo_man_put_node,
41942- ttm_bo_man_debug
41943+ .init = ttm_bo_man_init,
41944+ .takedown = ttm_bo_man_takedown,
41945+ .get_node = ttm_bo_man_get_node,
41946+ .put_node = ttm_bo_man_put_node,
41947+ .debug = ttm_bo_man_debug
41948 };
41949 EXPORT_SYMBOL(ttm_bo_manager_func);
41950diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41951index a1803fb..c53f6b0 100644
41952--- a/drivers/gpu/drm/ttm/ttm_memory.c
41953+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41954@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41955 zone->glob = glob;
41956 glob->zone_kernel = zone;
41957 ret = kobject_init_and_add(
41958- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41959+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41960 if (unlikely(ret != 0)) {
41961 kobject_put(&zone->kobj);
41962 return ret;
41963@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41964 zone->glob = glob;
41965 glob->zone_dma32 = zone;
41966 ret = kobject_init_and_add(
41967- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41968+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41969 if (unlikely(ret != 0)) {
41970 kobject_put(&zone->kobj);
41971 return ret;
41972diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41973index 025c429..314062f 100644
41974--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41975+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41976@@ -54,7 +54,7 @@
41977
41978 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41979 #define SMALL_ALLOCATION 16
41980-#define FREE_ALL_PAGES (~0U)
41981+#define FREE_ALL_PAGES (~0UL)
41982 /* times are in msecs */
41983 #define PAGE_FREE_INTERVAL 1000
41984
41985@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41986 * @free_all: If set to true will free all pages in pool
41987 * @use_static: Safe to use static buffer
41988 **/
41989-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41990+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41991 bool use_static)
41992 {
41993 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41994 unsigned long irq_flags;
41995 struct page *p;
41996 struct page **pages_to_free;
41997- unsigned freed_pages = 0,
41998- npages_to_free = nr_free;
41999+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42000
42001 if (NUM_PAGES_TO_ALLOC < nr_free)
42002 npages_to_free = NUM_PAGES_TO_ALLOC;
42003@@ -371,7 +370,8 @@ restart:
42004 __list_del(&p->lru, &pool->list);
42005
42006 ttm_pool_update_free_locked(pool, freed_pages);
42007- nr_free -= freed_pages;
42008+ if (likely(nr_free != FREE_ALL_PAGES))
42009+ nr_free -= freed_pages;
42010 }
42011
42012 spin_unlock_irqrestore(&pool->lock, irq_flags);
42013@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42014 unsigned i;
42015 unsigned pool_offset;
42016 struct ttm_page_pool *pool;
42017- int shrink_pages = sc->nr_to_scan;
42018+ unsigned long shrink_pages = sc->nr_to_scan;
42019 unsigned long freed = 0;
42020
42021 if (!mutex_trylock(&lock))
42022@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42023 pool_offset = ++start_pool % NUM_POOLS;
42024 /* select start pool in round robin fashion */
42025 for (i = 0; i < NUM_POOLS; ++i) {
42026- unsigned nr_free = shrink_pages;
42027+ unsigned long nr_free = shrink_pages;
42028 if (shrink_pages == 0)
42029 break;
42030 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
42031@@ -673,7 +673,7 @@ out:
42032 }
42033
42034 /* Put all pages in pages list to correct pool to wait for reuse */
42035-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
42036+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
42037 enum ttm_caching_state cstate)
42038 {
42039 unsigned long irq_flags;
42040@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
42041 struct list_head plist;
42042 struct page *p = NULL;
42043 gfp_t gfp_flags = GFP_USER;
42044- unsigned count;
42045+ unsigned long count;
42046 int r;
42047
42048 /* set zero flag for page allocation if required */
42049diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42050index 01e1d27..aaa018a 100644
42051--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42052+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42053@@ -56,7 +56,7 @@
42054
42055 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42056 #define SMALL_ALLOCATION 4
42057-#define FREE_ALL_PAGES (~0U)
42058+#define FREE_ALL_PAGES (~0UL)
42059 /* times are in msecs */
42060 #define IS_UNDEFINED (0)
42061 #define IS_WC (1<<1)
42062@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
42063 * @nr_free: If set to true will free all pages in pool
42064 * @use_static: Safe to use static buffer
42065 **/
42066-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42067+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
42068 bool use_static)
42069 {
42070 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42071@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42072 struct dma_page *dma_p, *tmp;
42073 struct page **pages_to_free;
42074 struct list_head d_pages;
42075- unsigned freed_pages = 0,
42076- npages_to_free = nr_free;
42077+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42078
42079 if (NUM_PAGES_TO_ALLOC < nr_free)
42080 npages_to_free = NUM_PAGES_TO_ALLOC;
42081@@ -499,7 +498,8 @@ restart:
42082 /* remove range of pages from the pool */
42083 if (freed_pages) {
42084 ttm_pool_update_free_locked(pool, freed_pages);
42085- nr_free -= freed_pages;
42086+ if (likely(nr_free != FREE_ALL_PAGES))
42087+ nr_free -= freed_pages;
42088 }
42089
42090 spin_unlock_irqrestore(&pool->lock, irq_flags);
42091@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
42092 struct dma_page *d_page, *next;
42093 enum pool_type type;
42094 bool is_cached = false;
42095- unsigned count = 0, i, npages = 0;
42096+ unsigned long count = 0, i, npages = 0;
42097 unsigned long irq_flags;
42098
42099 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
42100@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42101 static unsigned start_pool;
42102 unsigned idx = 0;
42103 unsigned pool_offset;
42104- unsigned shrink_pages = sc->nr_to_scan;
42105+ unsigned long shrink_pages = sc->nr_to_scan;
42106 struct device_pools *p;
42107 unsigned long freed = 0;
42108
42109@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42110 goto out;
42111 pool_offset = ++start_pool % _manager->npools;
42112 list_for_each_entry(p, &_manager->pools, pools) {
42113- unsigned nr_free;
42114+ unsigned long nr_free;
42115
42116 if (!p->dev)
42117 continue;
42118@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42119 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
42120 freed += nr_free - shrink_pages;
42121
42122- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
42123+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
42124 p->pool->dev_name, p->pool->name, current->pid,
42125 nr_free, shrink_pages);
42126 }
42127diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42128index 5fc16ce..1bd84ec 100644
42129--- a/drivers/gpu/drm/udl/udl_fb.c
42130+++ b/drivers/gpu/drm/udl/udl_fb.c
42131@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42132 fb_deferred_io_cleanup(info);
42133 kfree(info->fbdefio);
42134 info->fbdefio = NULL;
42135- info->fbops->fb_mmap = udl_fb_mmap;
42136 }
42137
42138 pr_warn("released /dev/fb%d user=%d count=%d\n",
42139diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42140index ef8c500..01030c8 100644
42141--- a/drivers/gpu/drm/via/via_drv.h
42142+++ b/drivers/gpu/drm/via/via_drv.h
42143@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
42144 typedef uint32_t maskarray_t[5];
42145
42146 typedef struct drm_via_irq {
42147- atomic_t irq_received;
42148+ atomic_unchecked_t irq_received;
42149 uint32_t pending_mask;
42150 uint32_t enable_mask;
42151 wait_queue_head_t irq_queue;
42152@@ -77,7 +77,7 @@ typedef struct drm_via_private {
42153 struct timeval last_vblank;
42154 int last_vblank_valid;
42155 unsigned usec_per_vblank;
42156- atomic_t vbl_received;
42157+ atomic_unchecked_t vbl_received;
42158 drm_via_state_t hc_state;
42159 char pci_buf[VIA_PCI_BUF_SIZE];
42160 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42161diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42162index 1319433..a993b0c 100644
42163--- a/drivers/gpu/drm/via/via_irq.c
42164+++ b/drivers/gpu/drm/via/via_irq.c
42165@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42166 if (crtc != 0)
42167 return 0;
42168
42169- return atomic_read(&dev_priv->vbl_received);
42170+ return atomic_read_unchecked(&dev_priv->vbl_received);
42171 }
42172
42173 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42174@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42175
42176 status = VIA_READ(VIA_REG_INTERRUPT);
42177 if (status & VIA_IRQ_VBLANK_PENDING) {
42178- atomic_inc(&dev_priv->vbl_received);
42179- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42180+ atomic_inc_unchecked(&dev_priv->vbl_received);
42181+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42182 do_gettimeofday(&cur_vblank);
42183 if (dev_priv->last_vblank_valid) {
42184 dev_priv->usec_per_vblank =
42185@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42186 dev_priv->last_vblank = cur_vblank;
42187 dev_priv->last_vblank_valid = 1;
42188 }
42189- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42190+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42191 DRM_DEBUG("US per vblank is: %u\n",
42192 dev_priv->usec_per_vblank);
42193 }
42194@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42195
42196 for (i = 0; i < dev_priv->num_irqs; ++i) {
42197 if (status & cur_irq->pending_mask) {
42198- atomic_inc(&cur_irq->irq_received);
42199+ atomic_inc_unchecked(&cur_irq->irq_received);
42200 wake_up(&cur_irq->irq_queue);
42201 handled = 1;
42202 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42203@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42204 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42205 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42206 masks[irq][4]));
42207- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42208+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42209 } else {
42210 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42211 (((cur_irq_sequence =
42212- atomic_read(&cur_irq->irq_received)) -
42213+ atomic_read_unchecked(&cur_irq->irq_received)) -
42214 *sequence) <= (1 << 23)));
42215 }
42216 *sequence = cur_irq_sequence;
42217@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42218 }
42219
42220 for (i = 0; i < dev_priv->num_irqs; ++i) {
42221- atomic_set(&cur_irq->irq_received, 0);
42222+ atomic_set_unchecked(&cur_irq->irq_received, 0);
42223 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42224 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42225 init_waitqueue_head(&cur_irq->irq_queue);
42226@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42227 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42228 case VIA_IRQ_RELATIVE:
42229 irqwait->request.sequence +=
42230- atomic_read(&cur_irq->irq_received);
42231+ atomic_read_unchecked(&cur_irq->irq_received);
42232 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42233 case VIA_IRQ_ABSOLUTE:
42234 break;
42235diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42236index d26a6da..5fa41ed 100644
42237--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42238+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42239@@ -447,7 +447,7 @@ struct vmw_private {
42240 * Fencing and IRQs.
42241 */
42242
42243- atomic_t marker_seq;
42244+ atomic_unchecked_t marker_seq;
42245 wait_queue_head_t fence_queue;
42246 wait_queue_head_t fifo_queue;
42247 spinlock_t waiter_lock;
42248diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42249index 39f2b03..d1b0a64 100644
42250--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42251+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42252@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42253 (unsigned int) min,
42254 (unsigned int) fifo->capabilities);
42255
42256- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42257+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42258 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42259 vmw_marker_queue_init(&fifo->marker_queue);
42260 return vmw_fifo_send_fence(dev_priv, &dummy);
42261@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42262 if (reserveable)
42263 iowrite32(bytes, fifo_mem +
42264 SVGA_FIFO_RESERVED);
42265- return fifo_mem + (next_cmd >> 2);
42266+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42267 } else {
42268 need_bounce = true;
42269 }
42270@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42271
42272 fm = vmw_fifo_reserve(dev_priv, bytes);
42273 if (unlikely(fm == NULL)) {
42274- *seqno = atomic_read(&dev_priv->marker_seq);
42275+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42276 ret = -ENOMEM;
42277 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42278 false, 3*HZ);
42279@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42280 }
42281
42282 do {
42283- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42284+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42285 } while (*seqno == 0);
42286
42287 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42288diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42289index 170b61b..fec7348 100644
42290--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42291+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42292@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42293 }
42294
42295 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42296- vmw_gmrid_man_init,
42297- vmw_gmrid_man_takedown,
42298- vmw_gmrid_man_get_node,
42299- vmw_gmrid_man_put_node,
42300- vmw_gmrid_man_debug
42301+ .init = vmw_gmrid_man_init,
42302+ .takedown = vmw_gmrid_man_takedown,
42303+ .get_node = vmw_gmrid_man_get_node,
42304+ .put_node = vmw_gmrid_man_put_node,
42305+ .debug = vmw_gmrid_man_debug
42306 };
42307diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42308index 69c8ce2..cacb0ab 100644
42309--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42310+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42311@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42312 int ret;
42313
42314 num_clips = arg->num_clips;
42315- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42316+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42317
42318 if (unlikely(num_clips == 0))
42319 return 0;
42320@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42321 int ret;
42322
42323 num_clips = arg->num_clips;
42324- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42325+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42326
42327 if (unlikely(num_clips == 0))
42328 return 0;
42329diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42330index 9fe9827..0aa2fc0 100644
42331--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42332+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42333@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42334 * emitted. Then the fence is stale and signaled.
42335 */
42336
42337- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42338+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42339 > VMW_FENCE_WRAP);
42340
42341 return ret;
42342@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42343
42344 if (fifo_idle)
42345 down_read(&fifo_state->rwsem);
42346- signal_seq = atomic_read(&dev_priv->marker_seq);
42347+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42348 ret = 0;
42349
42350 for (;;) {
42351diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42352index efd1ffd..0ae13ca 100644
42353--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42354+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42355@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42356 while (!vmw_lag_lt(queue, us)) {
42357 spin_lock(&queue->lock);
42358 if (list_empty(&queue->head))
42359- seqno = atomic_read(&dev_priv->marker_seq);
42360+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42361 else {
42362 marker = list_first_entry(&queue->head,
42363 struct vmw_marker, head);
42364diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42365index 37ac7b5..d52a5c9 100644
42366--- a/drivers/gpu/vga/vga_switcheroo.c
42367+++ b/drivers/gpu/vga/vga_switcheroo.c
42368@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42369
42370 /* this version is for the case where the power switch is separate
42371 to the device being powered down. */
42372-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42373+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42374 {
42375 /* copy over all the bus versions */
42376 if (dev->bus && dev->bus->pm) {
42377@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42378 return ret;
42379 }
42380
42381-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42382+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42383 {
42384 /* copy over all the bus versions */
42385 if (dev->bus && dev->bus->pm) {
42386diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42387index 56ce8c2..32ce524 100644
42388--- a/drivers/hid/hid-core.c
42389+++ b/drivers/hid/hid-core.c
42390@@ -2531,7 +2531,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42391
42392 int hid_add_device(struct hid_device *hdev)
42393 {
42394- static atomic_t id = ATOMIC_INIT(0);
42395+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42396 int ret;
42397
42398 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42399@@ -2574,7 +2574,7 @@ int hid_add_device(struct hid_device *hdev)
42400 /* XXX hack, any other cleaner solution after the driver core
42401 * is converted to allow more than 20 bytes as the device name? */
42402 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42403- hdev->vendor, hdev->product, atomic_inc_return(&id));
42404+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42405
42406 hid_debug_register(hdev, dev_name(&hdev->dev));
42407 ret = device_add(&hdev->dev);
42408diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42409index c13fb5b..55a3802 100644
42410--- a/drivers/hid/hid-wiimote-debug.c
42411+++ b/drivers/hid/hid-wiimote-debug.c
42412@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42413 else if (size == 0)
42414 return -EIO;
42415
42416- if (copy_to_user(u, buf, size))
42417+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42418 return -EFAULT;
42419
42420 *off += size;
42421diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42422index 00bc30e..d8e5097 100644
42423--- a/drivers/hv/channel.c
42424+++ b/drivers/hv/channel.c
42425@@ -370,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42426 int ret = 0;
42427
42428 next_gpadl_handle =
42429- (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42430+ (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42431
42432 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42433 if (ret)
42434diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42435index 50e51a5..b0bfd78 100644
42436--- a/drivers/hv/hv.c
42437+++ b/drivers/hv/hv.c
42438@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42439 u64 output_address = (output) ? virt_to_phys(output) : 0;
42440 u32 output_address_hi = output_address >> 32;
42441 u32 output_address_lo = output_address & 0xFFFFFFFF;
42442- void *hypercall_page = hv_context.hypercall_page;
42443+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42444
42445 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42446 "=a"(hv_status_lo) : "d" (control_hi),
42447@@ -164,7 +164,7 @@ int hv_init(void)
42448 /* See if the hypercall page is already set */
42449 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42450
42451- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42452+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42453
42454 if (!virtaddr)
42455 goto cleanup;
42456diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42457index ff16938..e60879c 100644
42458--- a/drivers/hv/hv_balloon.c
42459+++ b/drivers/hv/hv_balloon.c
42460@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42461
42462 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42463 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42464-static atomic_t trans_id = ATOMIC_INIT(0);
42465+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42466
42467 static int dm_ring_size = (5 * PAGE_SIZE);
42468
42469@@ -947,7 +947,7 @@ static void hot_add_req(struct work_struct *dummy)
42470 pr_info("Memory hot add failed\n");
42471
42472 dm->state = DM_INITIALIZED;
42473- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42474+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42475 vmbus_sendpacket(dm->dev->channel, &resp,
42476 sizeof(struct dm_hot_add_response),
42477 (unsigned long)NULL,
42478@@ -1028,7 +1028,7 @@ static void post_status(struct hv_dynmem_device *dm)
42479 memset(&status, 0, sizeof(struct dm_status));
42480 status.hdr.type = DM_STATUS_REPORT;
42481 status.hdr.size = sizeof(struct dm_status);
42482- status.hdr.trans_id = atomic_inc_return(&trans_id);
42483+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42484
42485 /*
42486 * The host expects the guest to report free memory.
42487@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
42488 * send the status. This can happen if we were interrupted
42489 * after we picked our transaction ID.
42490 */
42491- if (status.hdr.trans_id != atomic_read(&trans_id))
42492+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42493 return;
42494
42495 /*
42496@@ -1188,7 +1188,7 @@ static void balloon_up(struct work_struct *dummy)
42497 */
42498
42499 do {
42500- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42501+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42502 ret = vmbus_sendpacket(dm_device.dev->channel,
42503 bl_resp,
42504 bl_resp->hdr.size,
42505@@ -1234,7 +1234,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42506
42507 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42508 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42509- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42510+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42511 resp.hdr.size = sizeof(struct dm_unballoon_response);
42512
42513 vmbus_sendpacket(dm_device.dev->channel, &resp,
42514@@ -1295,7 +1295,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42515 memset(&version_req, 0, sizeof(struct dm_version_request));
42516 version_req.hdr.type = DM_VERSION_REQUEST;
42517 version_req.hdr.size = sizeof(struct dm_version_request);
42518- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42519+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42520 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42521 version_req.is_last_attempt = 1;
42522
42523@@ -1468,7 +1468,7 @@ static int balloon_probe(struct hv_device *dev,
42524 memset(&version_req, 0, sizeof(struct dm_version_request));
42525 version_req.hdr.type = DM_VERSION_REQUEST;
42526 version_req.hdr.size = sizeof(struct dm_version_request);
42527- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42528+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42529 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42530 version_req.is_last_attempt = 0;
42531
42532@@ -1499,7 +1499,7 @@ static int balloon_probe(struct hv_device *dev,
42533 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42534 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42535 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42536- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42537+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42538
42539 cap_msg.caps.cap_bits.balloon = 1;
42540 cap_msg.caps.cap_bits.hot_add = 1;
42541diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42542index 44b1c94..6dccc2c 100644
42543--- a/drivers/hv/hyperv_vmbus.h
42544+++ b/drivers/hv/hyperv_vmbus.h
42545@@ -632,7 +632,7 @@ enum vmbus_connect_state {
42546 struct vmbus_connection {
42547 enum vmbus_connect_state conn_state;
42548
42549- atomic_t next_gpadl_handle;
42550+ atomic_unchecked_t next_gpadl_handle;
42551
42552 /*
42553 * Represents channel interrupts. Each bit position represents a
42554diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42555index f518b8d7..4bc0b64 100644
42556--- a/drivers/hv/vmbus_drv.c
42557+++ b/drivers/hv/vmbus_drv.c
42558@@ -840,10 +840,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42559 {
42560 int ret = 0;
42561
42562- static atomic_t device_num = ATOMIC_INIT(0);
42563+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42564
42565 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42566- atomic_inc_return(&device_num));
42567+ atomic_inc_return_unchecked(&device_num));
42568
42569 child_device_obj->device.bus = &hv_bus;
42570 child_device_obj->device.parent = &hv_acpi_dev->dev;
42571diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42572index 579bdf9..0dac21d5 100644
42573--- a/drivers/hwmon/acpi_power_meter.c
42574+++ b/drivers/hwmon/acpi_power_meter.c
42575@@ -116,7 +116,7 @@ struct sensor_template {
42576 struct device_attribute *devattr,
42577 const char *buf, size_t count);
42578 int index;
42579-};
42580+} __do_const;
42581
42582 /* Averaging interval */
42583 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42584@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42585 struct sensor_template *attrs)
42586 {
42587 struct device *dev = &resource->acpi_dev->dev;
42588- struct sensor_device_attribute *sensors =
42589+ sensor_device_attribute_no_const *sensors =
42590 &resource->sensors[resource->num_sensors];
42591 int res = 0;
42592
42593@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42594 return 0;
42595 }
42596
42597-static struct dmi_system_id __initdata pm_dmi_table[] = {
42598+static const struct dmi_system_id __initconst pm_dmi_table[] = {
42599 {
42600 enable_cap_knobs, "IBM Active Energy Manager",
42601 {
42602diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42603index 0af63da..05a183a 100644
42604--- a/drivers/hwmon/applesmc.c
42605+++ b/drivers/hwmon/applesmc.c
42606@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42607 {
42608 struct applesmc_node_group *grp;
42609 struct applesmc_dev_attr *node;
42610- struct attribute *attr;
42611+ attribute_no_const *attr;
42612 int ret, i;
42613
42614 for (grp = groups; grp->format; grp++) {
42615diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42616index cccef87..06ce8ec 100644
42617--- a/drivers/hwmon/asus_atk0110.c
42618+++ b/drivers/hwmon/asus_atk0110.c
42619@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42620 struct atk_sensor_data {
42621 struct list_head list;
42622 struct atk_data *data;
42623- struct device_attribute label_attr;
42624- struct device_attribute input_attr;
42625- struct device_attribute limit1_attr;
42626- struct device_attribute limit2_attr;
42627+ device_attribute_no_const label_attr;
42628+ device_attribute_no_const input_attr;
42629+ device_attribute_no_const limit1_attr;
42630+ device_attribute_no_const limit2_attr;
42631 char label_attr_name[ATTR_NAME_SIZE];
42632 char input_attr_name[ATTR_NAME_SIZE];
42633 char limit1_attr_name[ATTR_NAME_SIZE];
42634@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42635 static struct device_attribute atk_name_attr =
42636 __ATTR(name, 0444, atk_name_show, NULL);
42637
42638-static void atk_init_attribute(struct device_attribute *attr, char *name,
42639+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42640 sysfs_show_func show)
42641 {
42642 sysfs_attr_init(&attr->attr);
42643diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42644index 5b7fec8..05c957a 100644
42645--- a/drivers/hwmon/coretemp.c
42646+++ b/drivers/hwmon/coretemp.c
42647@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42648 return NOTIFY_OK;
42649 }
42650
42651-static struct notifier_block coretemp_cpu_notifier __refdata = {
42652+static struct notifier_block coretemp_cpu_notifier = {
42653 .notifier_call = coretemp_cpu_callback,
42654 };
42655
42656diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42657index 7a8a6fb..015c1fd 100644
42658--- a/drivers/hwmon/ibmaem.c
42659+++ b/drivers/hwmon/ibmaem.c
42660@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42661 struct aem_rw_sensor_template *rw)
42662 {
42663 struct device *dev = &data->pdev->dev;
42664- struct sensor_device_attribute *sensors = data->sensors;
42665+ sensor_device_attribute_no_const *sensors = data->sensors;
42666 int err;
42667
42668 /* Set up read-only sensors */
42669diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42670index 17ae2eb..21b71dd 100644
42671--- a/drivers/hwmon/iio_hwmon.c
42672+++ b/drivers/hwmon/iio_hwmon.c
42673@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42674 {
42675 struct device *dev = &pdev->dev;
42676 struct iio_hwmon_state *st;
42677- struct sensor_device_attribute *a;
42678+ sensor_device_attribute_no_const *a;
42679 int ret, i;
42680 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42681 enum iio_chan_type type;
42682diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42683index f3830db..9f4d6d5 100644
42684--- a/drivers/hwmon/nct6683.c
42685+++ b/drivers/hwmon/nct6683.c
42686@@ -397,11 +397,11 @@ static struct attribute_group *
42687 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42688 int repeat)
42689 {
42690- struct sensor_device_attribute_2 *a2;
42691- struct sensor_device_attribute *a;
42692+ sensor_device_attribute_2_no_const *a2;
42693+ sensor_device_attribute_no_const *a;
42694 struct sensor_device_template **t;
42695 struct sensor_device_attr_u *su;
42696- struct attribute_group *group;
42697+ attribute_group_no_const *group;
42698 struct attribute **attrs;
42699 int i, j, count;
42700
42701diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42702index 1be4117..88ae1e1 100644
42703--- a/drivers/hwmon/nct6775.c
42704+++ b/drivers/hwmon/nct6775.c
42705@@ -952,10 +952,10 @@ static struct attribute_group *
42706 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42707 int repeat)
42708 {
42709- struct attribute_group *group;
42710+ attribute_group_no_const *group;
42711 struct sensor_device_attr_u *su;
42712- struct sensor_device_attribute *a;
42713- struct sensor_device_attribute_2 *a2;
42714+ sensor_device_attribute_no_const *a;
42715+ sensor_device_attribute_2_no_const *a2;
42716 struct attribute **attrs;
42717 struct sensor_device_template **t;
42718 int i, count;
42719diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42720index f2e47c7..45d7941 100644
42721--- a/drivers/hwmon/pmbus/pmbus_core.c
42722+++ b/drivers/hwmon/pmbus/pmbus_core.c
42723@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42724 return 0;
42725 }
42726
42727-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42728+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42729 const char *name,
42730 umode_t mode,
42731 ssize_t (*show)(struct device *dev,
42732@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42733 dev_attr->store = store;
42734 }
42735
42736-static void pmbus_attr_init(struct sensor_device_attribute *a,
42737+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42738 const char *name,
42739 umode_t mode,
42740 ssize_t (*show)(struct device *dev,
42741@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42742 u16 reg, u8 mask)
42743 {
42744 struct pmbus_boolean *boolean;
42745- struct sensor_device_attribute *a;
42746+ sensor_device_attribute_no_const *a;
42747
42748 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42749 if (!boolean)
42750@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42751 bool update, bool readonly)
42752 {
42753 struct pmbus_sensor *sensor;
42754- struct device_attribute *a;
42755+ device_attribute_no_const *a;
42756
42757 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42758 if (!sensor)
42759@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42760 const char *lstring, int index)
42761 {
42762 struct pmbus_label *label;
42763- struct device_attribute *a;
42764+ device_attribute_no_const *a;
42765
42766 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42767 if (!label)
42768diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42769index d4f0935..7420593 100644
42770--- a/drivers/hwmon/sht15.c
42771+++ b/drivers/hwmon/sht15.c
42772@@ -169,7 +169,7 @@ struct sht15_data {
42773 int supply_uv;
42774 bool supply_uv_valid;
42775 struct work_struct update_supply_work;
42776- atomic_t interrupt_handled;
42777+ atomic_unchecked_t interrupt_handled;
42778 };
42779
42780 /**
42781@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42782 ret = gpio_direction_input(data->pdata->gpio_data);
42783 if (ret)
42784 return ret;
42785- atomic_set(&data->interrupt_handled, 0);
42786+ atomic_set_unchecked(&data->interrupt_handled, 0);
42787
42788 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42789 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42790 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42791 /* Only relevant if the interrupt hasn't occurred. */
42792- if (!atomic_read(&data->interrupt_handled))
42793+ if (!atomic_read_unchecked(&data->interrupt_handled))
42794 schedule_work(&data->read_work);
42795 }
42796 ret = wait_event_timeout(data->wait_queue,
42797@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42798
42799 /* First disable the interrupt */
42800 disable_irq_nosync(irq);
42801- atomic_inc(&data->interrupt_handled);
42802+ atomic_inc_unchecked(&data->interrupt_handled);
42803 /* Then schedule a reading work struct */
42804 if (data->state != SHT15_READING_NOTHING)
42805 schedule_work(&data->read_work);
42806@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42807 * If not, then start the interrupt again - care here as could
42808 * have gone low in meantime so verify it hasn't!
42809 */
42810- atomic_set(&data->interrupt_handled, 0);
42811+ atomic_set_unchecked(&data->interrupt_handled, 0);
42812 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42813 /* If still not occurred or another handler was scheduled */
42814 if (gpio_get_value(data->pdata->gpio_data)
42815- || atomic_read(&data->interrupt_handled))
42816+ || atomic_read_unchecked(&data->interrupt_handled))
42817 return;
42818 }
42819
42820diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42821index ac91c07..8e69663 100644
42822--- a/drivers/hwmon/via-cputemp.c
42823+++ b/drivers/hwmon/via-cputemp.c
42824@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42825 return NOTIFY_OK;
42826 }
42827
42828-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42829+static struct notifier_block via_cputemp_cpu_notifier = {
42830 .notifier_call = via_cputemp_cpu_callback,
42831 };
42832
42833diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42834index 65e3240..e6c511d 100644
42835--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42836+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42837@@ -39,7 +39,7 @@
42838 extern struct i2c_adapter amd756_smbus;
42839
42840 static struct i2c_adapter *s4882_adapter;
42841-static struct i2c_algorithm *s4882_algo;
42842+static i2c_algorithm_no_const *s4882_algo;
42843
42844 /* Wrapper access functions for multiplexed SMBus */
42845 static DEFINE_MUTEX(amd756_lock);
42846diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42847index b19a310..d6eece0 100644
42848--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42849+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42850@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42851 /* usb layer */
42852
42853 /* Send command to device, and get response. */
42854-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42855+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42856 {
42857 int ret = 0;
42858 int actual;
42859diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42860index 88eda09..cf40434 100644
42861--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42862+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42863@@ -37,7 +37,7 @@
42864 extern struct i2c_adapter *nforce2_smbus;
42865
42866 static struct i2c_adapter *s4985_adapter;
42867-static struct i2c_algorithm *s4985_algo;
42868+static i2c_algorithm_no_const *s4985_algo;
42869
42870 /* Wrapper access functions for multiplexed SMBus */
42871 static DEFINE_MUTEX(nforce2_lock);
42872diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42873index 71c7a39..71dd3e0 100644
42874--- a/drivers/i2c/i2c-dev.c
42875+++ b/drivers/i2c/i2c-dev.c
42876@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42877 break;
42878 }
42879
42880- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42881+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42882 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42883 if (IS_ERR(rdwr_pa[i].buf)) {
42884 res = PTR_ERR(rdwr_pa[i].buf);
42885diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42886index 0b510ba..4fbb5085 100644
42887--- a/drivers/ide/ide-cd.c
42888+++ b/drivers/ide/ide-cd.c
42889@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42890 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42891 if ((unsigned long)buf & alignment
42892 || blk_rq_bytes(rq) & q->dma_pad_mask
42893- || object_is_on_stack(buf))
42894+ || object_starts_on_stack(buf))
42895 drive->dma = 0;
42896 }
42897 }
42898diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42899index 4df97f6..c751151 100644
42900--- a/drivers/iio/industrialio-core.c
42901+++ b/drivers/iio/industrialio-core.c
42902@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42903 }
42904
42905 static
42906-int __iio_device_attr_init(struct device_attribute *dev_attr,
42907+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42908 const char *postfix,
42909 struct iio_chan_spec const *chan,
42910 ssize_t (*readfunc)(struct device *dev,
42911diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42912index e28a494..f7c2671 100644
42913--- a/drivers/infiniband/core/cm.c
42914+++ b/drivers/infiniband/core/cm.c
42915@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42916
42917 struct cm_counter_group {
42918 struct kobject obj;
42919- atomic_long_t counter[CM_ATTR_COUNT];
42920+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42921 };
42922
42923 struct cm_counter_attribute {
42924@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42925 struct ib_mad_send_buf *msg = NULL;
42926 int ret;
42927
42928- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42929+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42930 counter[CM_REQ_COUNTER]);
42931
42932 /* Quick state check to discard duplicate REQs. */
42933@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42934 if (!cm_id_priv)
42935 return;
42936
42937- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42938+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42939 counter[CM_REP_COUNTER]);
42940 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42941 if (ret)
42942@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42943 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42944 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42945 spin_unlock_irq(&cm_id_priv->lock);
42946- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42947+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42948 counter[CM_RTU_COUNTER]);
42949 goto out;
42950 }
42951@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42952 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42953 dreq_msg->local_comm_id);
42954 if (!cm_id_priv) {
42955- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42956+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42957 counter[CM_DREQ_COUNTER]);
42958 cm_issue_drep(work->port, work->mad_recv_wc);
42959 return -EINVAL;
42960@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42961 case IB_CM_MRA_REP_RCVD:
42962 break;
42963 case IB_CM_TIMEWAIT:
42964- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42965+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42966 counter[CM_DREQ_COUNTER]);
42967 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42968 goto unlock;
42969@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42970 cm_free_msg(msg);
42971 goto deref;
42972 case IB_CM_DREQ_RCVD:
42973- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42974+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42975 counter[CM_DREQ_COUNTER]);
42976 goto unlock;
42977 default:
42978@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42979 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42980 cm_id_priv->msg, timeout)) {
42981 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42982- atomic_long_inc(&work->port->
42983+ atomic_long_inc_unchecked(&work->port->
42984 counter_group[CM_RECV_DUPLICATES].
42985 counter[CM_MRA_COUNTER]);
42986 goto out;
42987@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42988 break;
42989 case IB_CM_MRA_REQ_RCVD:
42990 case IB_CM_MRA_REP_RCVD:
42991- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42992+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42993 counter[CM_MRA_COUNTER]);
42994 /* fall through */
42995 default:
42996@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42997 case IB_CM_LAP_IDLE:
42998 break;
42999 case IB_CM_MRA_LAP_SENT:
43000- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43001+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43002 counter[CM_LAP_COUNTER]);
43003 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43004 goto unlock;
43005@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43006 cm_free_msg(msg);
43007 goto deref;
43008 case IB_CM_LAP_RCVD:
43009- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43010+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43011 counter[CM_LAP_COUNTER]);
43012 goto unlock;
43013 default:
43014@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43015 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43016 if (cur_cm_id_priv) {
43017 spin_unlock_irq(&cm.lock);
43018- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43019+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43020 counter[CM_SIDR_REQ_COUNTER]);
43021 goto out; /* Duplicate message. */
43022 }
43023@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43024 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43025 msg->retries = 1;
43026
43027- atomic_long_add(1 + msg->retries,
43028+ atomic_long_add_unchecked(1 + msg->retries,
43029 &port->counter_group[CM_XMIT].counter[attr_index]);
43030 if (msg->retries)
43031- atomic_long_add(msg->retries,
43032+ atomic_long_add_unchecked(msg->retries,
43033 &port->counter_group[CM_XMIT_RETRIES].
43034 counter[attr_index]);
43035
43036@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43037 }
43038
43039 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43040- atomic_long_inc(&port->counter_group[CM_RECV].
43041+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43042 counter[attr_id - CM_ATTR_ID_OFFSET]);
43043
43044 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43045@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43046 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43047
43048 return sprintf(buf, "%ld\n",
43049- atomic_long_read(&group->counter[cm_attr->index]));
43050+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43051 }
43052
43053 static const struct sysfs_ops cm_counter_ops = {
43054diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43055index 9f5ad7c..588cd84 100644
43056--- a/drivers/infiniband/core/fmr_pool.c
43057+++ b/drivers/infiniband/core/fmr_pool.c
43058@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43059
43060 struct task_struct *thread;
43061
43062- atomic_t req_ser;
43063- atomic_t flush_ser;
43064+ atomic_unchecked_t req_ser;
43065+ atomic_unchecked_t flush_ser;
43066
43067 wait_queue_head_t force_wait;
43068 };
43069@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43070 struct ib_fmr_pool *pool = pool_ptr;
43071
43072 do {
43073- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43074+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43075 ib_fmr_batch_release(pool);
43076
43077- atomic_inc(&pool->flush_ser);
43078+ atomic_inc_unchecked(&pool->flush_ser);
43079 wake_up_interruptible(&pool->force_wait);
43080
43081 if (pool->flush_function)
43082@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43083 }
43084
43085 set_current_state(TASK_INTERRUPTIBLE);
43086- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43087+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43088 !kthread_should_stop())
43089 schedule();
43090 __set_current_state(TASK_RUNNING);
43091@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43092 pool->dirty_watermark = params->dirty_watermark;
43093 pool->dirty_len = 0;
43094 spin_lock_init(&pool->pool_lock);
43095- atomic_set(&pool->req_ser, 0);
43096- atomic_set(&pool->flush_ser, 0);
43097+ atomic_set_unchecked(&pool->req_ser, 0);
43098+ atomic_set_unchecked(&pool->flush_ser, 0);
43099 init_waitqueue_head(&pool->force_wait);
43100
43101 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43102@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43103 }
43104 spin_unlock_irq(&pool->pool_lock);
43105
43106- serial = atomic_inc_return(&pool->req_ser);
43107+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43108 wake_up_process(pool->thread);
43109
43110 if (wait_event_interruptible(pool->force_wait,
43111- atomic_read(&pool->flush_ser) - serial >= 0))
43112+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43113 return -EINTR;
43114
43115 return 0;
43116@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43117 } else {
43118 list_add_tail(&fmr->list, &pool->dirty_list);
43119 if (++pool->dirty_len >= pool->dirty_watermark) {
43120- atomic_inc(&pool->req_ser);
43121+ atomic_inc_unchecked(&pool->req_ser);
43122 wake_up_process(pool->thread);
43123 }
43124 }
43125diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
43126index a9f0489..27a161b 100644
43127--- a/drivers/infiniband/core/uverbs_cmd.c
43128+++ b/drivers/infiniband/core/uverbs_cmd.c
43129@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
43130 if (copy_from_user(&cmd, buf, sizeof cmd))
43131 return -EFAULT;
43132
43133+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
43134+ return -EFAULT;
43135+
43136 INIT_UDATA(&udata, buf + sizeof cmd,
43137 (unsigned long) cmd.response + sizeof resp,
43138 in_len - sizeof cmd, out_len - sizeof resp);
43139diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43140index 6791fd1..78bdcdf 100644
43141--- a/drivers/infiniband/hw/cxgb4/mem.c
43142+++ b/drivers/infiniband/hw/cxgb4/mem.c
43143@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43144 int err;
43145 struct fw_ri_tpte tpt;
43146 u32 stag_idx;
43147- static atomic_t key;
43148+ static atomic_unchecked_t key;
43149
43150 if (c4iw_fatal_error(rdev))
43151 return -EIO;
43152@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43153 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43154 rdev->stats.stag.max = rdev->stats.stag.cur;
43155 mutex_unlock(&rdev->stats.lock);
43156- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43157+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43158 }
43159 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43160 __func__, stag_state, type, pdid, stag_idx);
43161diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43162index 79b3dbc..96e5fcc 100644
43163--- a/drivers/infiniband/hw/ipath/ipath_rc.c
43164+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43165@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43166 struct ib_atomic_eth *ateth;
43167 struct ipath_ack_entry *e;
43168 u64 vaddr;
43169- atomic64_t *maddr;
43170+ atomic64_unchecked_t *maddr;
43171 u64 sdata;
43172 u32 rkey;
43173 u8 next;
43174@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43175 IB_ACCESS_REMOTE_ATOMIC)))
43176 goto nack_acc_unlck;
43177 /* Perform atomic OP and save result. */
43178- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43179+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43180 sdata = be64_to_cpu(ateth->swap_data);
43181 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43182 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43183- (u64) atomic64_add_return(sdata, maddr) - sdata :
43184+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43185 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43186 be64_to_cpu(ateth->compare_data),
43187 sdata);
43188diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43189index 1f95bba..9530f87 100644
43190--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43191+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43192@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43193 unsigned long flags;
43194 struct ib_wc wc;
43195 u64 sdata;
43196- atomic64_t *maddr;
43197+ atomic64_unchecked_t *maddr;
43198 enum ib_wc_status send_status;
43199
43200 /*
43201@@ -382,11 +382,11 @@ again:
43202 IB_ACCESS_REMOTE_ATOMIC)))
43203 goto acc_err;
43204 /* Perform atomic OP and save result. */
43205- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43206+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43207 sdata = wqe->wr.wr.atomic.compare_add;
43208 *(u64 *) sqp->s_sge.sge.vaddr =
43209 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43210- (u64) atomic64_add_return(sdata, maddr) - sdata :
43211+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43212 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43213 sdata, wqe->wr.wr.atomic.swap);
43214 goto send_comp;
43215diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43216index 5904026..f1c30e5 100644
43217--- a/drivers/infiniband/hw/mlx4/mad.c
43218+++ b/drivers/infiniband/hw/mlx4/mad.c
43219@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43220
43221 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43222 {
43223- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43224+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43225 cpu_to_be64(0xff00000000000000LL);
43226 }
43227
43228diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43229index ed327e6..ca1739e0 100644
43230--- a/drivers/infiniband/hw/mlx4/mcg.c
43231+++ b/drivers/infiniband/hw/mlx4/mcg.c
43232@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43233 {
43234 char name[20];
43235
43236- atomic_set(&ctx->tid, 0);
43237+ atomic_set_unchecked(&ctx->tid, 0);
43238 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43239 ctx->mcg_wq = create_singlethread_workqueue(name);
43240 if (!ctx->mcg_wq)
43241diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43242index f829fd9..1a8d436 100644
43243--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43244+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43245@@ -439,7 +439,7 @@ struct mlx4_ib_demux_ctx {
43246 struct list_head mcg_mgid0_list;
43247 struct workqueue_struct *mcg_wq;
43248 struct mlx4_ib_demux_pv_ctx **tun;
43249- atomic_t tid;
43250+ atomic_unchecked_t tid;
43251 int flushing; /* flushing the work queue */
43252 };
43253
43254diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43255index 9d3e5c1..6f166df 100644
43256--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43257+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43258@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43259 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43260 }
43261
43262-int mthca_QUERY_FW(struct mthca_dev *dev)
43263+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43264 {
43265 struct mthca_mailbox *mailbox;
43266 u32 *outbox;
43267@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43268 CMD_TIME_CLASS_B);
43269 }
43270
43271-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43272+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43273 int num_mtt)
43274 {
43275 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43276@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43277 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43278 }
43279
43280-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43281+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43282 int eq_num)
43283 {
43284 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43285@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43286 CMD_TIME_CLASS_B);
43287 }
43288
43289-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43290+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43291 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43292 void *in_mad, void *response_mad)
43293 {
43294diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43295index ded76c1..0cf0a08 100644
43296--- a/drivers/infiniband/hw/mthca/mthca_main.c
43297+++ b/drivers/infiniband/hw/mthca/mthca_main.c
43298@@ -692,7 +692,7 @@ err_close:
43299 return err;
43300 }
43301
43302-static int mthca_setup_hca(struct mthca_dev *dev)
43303+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43304 {
43305 int err;
43306
43307diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43308index ed9a989..6aa5dc2 100644
43309--- a/drivers/infiniband/hw/mthca/mthca_mr.c
43310+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43311@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43312 * through the bitmaps)
43313 */
43314
43315-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43316+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43317 {
43318 int o;
43319 int m;
43320@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43321 return key;
43322 }
43323
43324-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43325+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43326 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43327 {
43328 struct mthca_mailbox *mailbox;
43329@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43330 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43331 }
43332
43333-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43334+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43335 u64 *buffer_list, int buffer_size_shift,
43336 int list_len, u64 iova, u64 total_size,
43337 u32 access, struct mthca_mr *mr)
43338diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43339index 415f8e1..e34214e 100644
43340--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43341+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43342@@ -764,7 +764,7 @@ unlock:
43343 return 0;
43344 }
43345
43346-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43347+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43348 {
43349 struct mthca_dev *dev = to_mdev(ibcq->device);
43350 struct mthca_cq *cq = to_mcq(ibcq);
43351diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43352index 3b2a6dc..bce26ff 100644
43353--- a/drivers/infiniband/hw/nes/nes.c
43354+++ b/drivers/infiniband/hw/nes/nes.c
43355@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43356 LIST_HEAD(nes_adapter_list);
43357 static LIST_HEAD(nes_dev_list);
43358
43359-atomic_t qps_destroyed;
43360+atomic_unchecked_t qps_destroyed;
43361
43362 static unsigned int ee_flsh_adapter;
43363 static unsigned int sysfs_nonidx_addr;
43364@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43365 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43366 struct nes_adapter *nesadapter = nesdev->nesadapter;
43367
43368- atomic_inc(&qps_destroyed);
43369+ atomic_inc_unchecked(&qps_destroyed);
43370
43371 /* Free the control structures */
43372
43373diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43374index bd9d132..70d84f4 100644
43375--- a/drivers/infiniband/hw/nes/nes.h
43376+++ b/drivers/infiniband/hw/nes/nes.h
43377@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43378 extern unsigned int wqm_quanta;
43379 extern struct list_head nes_adapter_list;
43380
43381-extern atomic_t cm_connects;
43382-extern atomic_t cm_accepts;
43383-extern atomic_t cm_disconnects;
43384-extern atomic_t cm_closes;
43385-extern atomic_t cm_connecteds;
43386-extern atomic_t cm_connect_reqs;
43387-extern atomic_t cm_rejects;
43388-extern atomic_t mod_qp_timouts;
43389-extern atomic_t qps_created;
43390-extern atomic_t qps_destroyed;
43391-extern atomic_t sw_qps_destroyed;
43392+extern atomic_unchecked_t cm_connects;
43393+extern atomic_unchecked_t cm_accepts;
43394+extern atomic_unchecked_t cm_disconnects;
43395+extern atomic_unchecked_t cm_closes;
43396+extern atomic_unchecked_t cm_connecteds;
43397+extern atomic_unchecked_t cm_connect_reqs;
43398+extern atomic_unchecked_t cm_rejects;
43399+extern atomic_unchecked_t mod_qp_timouts;
43400+extern atomic_unchecked_t qps_created;
43401+extern atomic_unchecked_t qps_destroyed;
43402+extern atomic_unchecked_t sw_qps_destroyed;
43403 extern u32 mh_detected;
43404 extern u32 mh_pauses_sent;
43405 extern u32 cm_packets_sent;
43406@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43407 extern u32 cm_packets_received;
43408 extern u32 cm_packets_dropped;
43409 extern u32 cm_packets_retrans;
43410-extern atomic_t cm_listens_created;
43411-extern atomic_t cm_listens_destroyed;
43412+extern atomic_unchecked_t cm_listens_created;
43413+extern atomic_unchecked_t cm_listens_destroyed;
43414 extern u32 cm_backlog_drops;
43415-extern atomic_t cm_loopbacks;
43416-extern atomic_t cm_nodes_created;
43417-extern atomic_t cm_nodes_destroyed;
43418-extern atomic_t cm_accel_dropped_pkts;
43419-extern atomic_t cm_resets_recvd;
43420-extern atomic_t pau_qps_created;
43421-extern atomic_t pau_qps_destroyed;
43422+extern atomic_unchecked_t cm_loopbacks;
43423+extern atomic_unchecked_t cm_nodes_created;
43424+extern atomic_unchecked_t cm_nodes_destroyed;
43425+extern atomic_unchecked_t cm_accel_dropped_pkts;
43426+extern atomic_unchecked_t cm_resets_recvd;
43427+extern atomic_unchecked_t pau_qps_created;
43428+extern atomic_unchecked_t pau_qps_destroyed;
43429
43430 extern u32 int_mod_timer_init;
43431 extern u32 int_mod_cq_depth_256;
43432diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43433index 6f09a72..cf4399d 100644
43434--- a/drivers/infiniband/hw/nes/nes_cm.c
43435+++ b/drivers/infiniband/hw/nes/nes_cm.c
43436@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43437 u32 cm_packets_retrans;
43438 u32 cm_packets_created;
43439 u32 cm_packets_received;
43440-atomic_t cm_listens_created;
43441-atomic_t cm_listens_destroyed;
43442+atomic_unchecked_t cm_listens_created;
43443+atomic_unchecked_t cm_listens_destroyed;
43444 u32 cm_backlog_drops;
43445-atomic_t cm_loopbacks;
43446-atomic_t cm_nodes_created;
43447-atomic_t cm_nodes_destroyed;
43448-atomic_t cm_accel_dropped_pkts;
43449-atomic_t cm_resets_recvd;
43450+atomic_unchecked_t cm_loopbacks;
43451+atomic_unchecked_t cm_nodes_created;
43452+atomic_unchecked_t cm_nodes_destroyed;
43453+atomic_unchecked_t cm_accel_dropped_pkts;
43454+atomic_unchecked_t cm_resets_recvd;
43455
43456 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43457 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43458@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43459 /* instance of function pointers for client API */
43460 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43461 static struct nes_cm_ops nes_cm_api = {
43462- mini_cm_accelerated,
43463- mini_cm_listen,
43464- mini_cm_del_listen,
43465- mini_cm_connect,
43466- mini_cm_close,
43467- mini_cm_accept,
43468- mini_cm_reject,
43469- mini_cm_recv_pkt,
43470- mini_cm_dealloc_core,
43471- mini_cm_get,
43472- mini_cm_set
43473+ .accelerated = mini_cm_accelerated,
43474+ .listen = mini_cm_listen,
43475+ .stop_listener = mini_cm_del_listen,
43476+ .connect = mini_cm_connect,
43477+ .close = mini_cm_close,
43478+ .accept = mini_cm_accept,
43479+ .reject = mini_cm_reject,
43480+ .recv_pkt = mini_cm_recv_pkt,
43481+ .destroy_cm_core = mini_cm_dealloc_core,
43482+ .get = mini_cm_get,
43483+ .set = mini_cm_set
43484 };
43485
43486 static struct nes_cm_core *g_cm_core;
43487
43488-atomic_t cm_connects;
43489-atomic_t cm_accepts;
43490-atomic_t cm_disconnects;
43491-atomic_t cm_closes;
43492-atomic_t cm_connecteds;
43493-atomic_t cm_connect_reqs;
43494-atomic_t cm_rejects;
43495+atomic_unchecked_t cm_connects;
43496+atomic_unchecked_t cm_accepts;
43497+atomic_unchecked_t cm_disconnects;
43498+atomic_unchecked_t cm_closes;
43499+atomic_unchecked_t cm_connecteds;
43500+atomic_unchecked_t cm_connect_reqs;
43501+atomic_unchecked_t cm_rejects;
43502
43503 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43504 {
43505@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43506 kfree(listener);
43507 listener = NULL;
43508 ret = 0;
43509- atomic_inc(&cm_listens_destroyed);
43510+ atomic_inc_unchecked(&cm_listens_destroyed);
43511 } else {
43512 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43513 }
43514@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43515 cm_node->rem_mac);
43516
43517 add_hte_node(cm_core, cm_node);
43518- atomic_inc(&cm_nodes_created);
43519+ atomic_inc_unchecked(&cm_nodes_created);
43520
43521 return cm_node;
43522 }
43523@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43524 }
43525
43526 atomic_dec(&cm_core->node_cnt);
43527- atomic_inc(&cm_nodes_destroyed);
43528+ atomic_inc_unchecked(&cm_nodes_destroyed);
43529 nesqp = cm_node->nesqp;
43530 if (nesqp) {
43531 nesqp->cm_node = NULL;
43532@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43533
43534 static void drop_packet(struct sk_buff *skb)
43535 {
43536- atomic_inc(&cm_accel_dropped_pkts);
43537+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43538 dev_kfree_skb_any(skb);
43539 }
43540
43541@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43542 {
43543
43544 int reset = 0; /* whether to send reset in case of err.. */
43545- atomic_inc(&cm_resets_recvd);
43546+ atomic_inc_unchecked(&cm_resets_recvd);
43547 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43548 " refcnt=%d\n", cm_node, cm_node->state,
43549 atomic_read(&cm_node->ref_count));
43550@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43551 rem_ref_cm_node(cm_node->cm_core, cm_node);
43552 return NULL;
43553 }
43554- atomic_inc(&cm_loopbacks);
43555+ atomic_inc_unchecked(&cm_loopbacks);
43556 loopbackremotenode->loopbackpartner = cm_node;
43557 loopbackremotenode->tcp_cntxt.rcv_wscale =
43558 NES_CM_DEFAULT_RCV_WND_SCALE;
43559@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43560 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43561 else {
43562 rem_ref_cm_node(cm_core, cm_node);
43563- atomic_inc(&cm_accel_dropped_pkts);
43564+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43565 dev_kfree_skb_any(skb);
43566 }
43567 break;
43568@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43569
43570 if ((cm_id) && (cm_id->event_handler)) {
43571 if (issue_disconn) {
43572- atomic_inc(&cm_disconnects);
43573+ atomic_inc_unchecked(&cm_disconnects);
43574 cm_event.event = IW_CM_EVENT_DISCONNECT;
43575 cm_event.status = disconn_status;
43576 cm_event.local_addr = cm_id->local_addr;
43577@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43578 }
43579
43580 if (issue_close) {
43581- atomic_inc(&cm_closes);
43582+ atomic_inc_unchecked(&cm_closes);
43583 nes_disconnect(nesqp, 1);
43584
43585 cm_id->provider_data = nesqp;
43586@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43587
43588 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43589 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43590- atomic_inc(&cm_accepts);
43591+ atomic_inc_unchecked(&cm_accepts);
43592
43593 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43594 netdev_refcnt_read(nesvnic->netdev));
43595@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43596 struct nes_cm_core *cm_core;
43597 u8 *start_buff;
43598
43599- atomic_inc(&cm_rejects);
43600+ atomic_inc_unchecked(&cm_rejects);
43601 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43602 loopback = cm_node->loopbackpartner;
43603 cm_core = cm_node->cm_core;
43604@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43605 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43606 ntohs(laddr->sin_port));
43607
43608- atomic_inc(&cm_connects);
43609+ atomic_inc_unchecked(&cm_connects);
43610 nesqp->active_conn = 1;
43611
43612 /* cache the cm_id in the qp */
43613@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43614 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43615 return err;
43616 }
43617- atomic_inc(&cm_listens_created);
43618+ atomic_inc_unchecked(&cm_listens_created);
43619 }
43620
43621 cm_id->add_ref(cm_id);
43622@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43623
43624 if (nesqp->destroyed)
43625 return;
43626- atomic_inc(&cm_connecteds);
43627+ atomic_inc_unchecked(&cm_connecteds);
43628 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43629 " local port 0x%04X. jiffies = %lu.\n",
43630 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43631@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43632
43633 cm_id->add_ref(cm_id);
43634 ret = cm_id->event_handler(cm_id, &cm_event);
43635- atomic_inc(&cm_closes);
43636+ atomic_inc_unchecked(&cm_closes);
43637 cm_event.event = IW_CM_EVENT_CLOSE;
43638 cm_event.status = 0;
43639 cm_event.provider_data = cm_id->provider_data;
43640@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43641 return;
43642 cm_id = cm_node->cm_id;
43643
43644- atomic_inc(&cm_connect_reqs);
43645+ atomic_inc_unchecked(&cm_connect_reqs);
43646 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43647 cm_node, cm_id, jiffies);
43648
43649@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43650 return;
43651 cm_id = cm_node->cm_id;
43652
43653- atomic_inc(&cm_connect_reqs);
43654+ atomic_inc_unchecked(&cm_connect_reqs);
43655 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43656 cm_node, cm_id, jiffies);
43657
43658diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43659index 4166452..fc952c3 100644
43660--- a/drivers/infiniband/hw/nes/nes_mgt.c
43661+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43662@@ -40,8 +40,8 @@
43663 #include "nes.h"
43664 #include "nes_mgt.h"
43665
43666-atomic_t pau_qps_created;
43667-atomic_t pau_qps_destroyed;
43668+atomic_unchecked_t pau_qps_created;
43669+atomic_unchecked_t pau_qps_destroyed;
43670
43671 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43672 {
43673@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43674 {
43675 struct sk_buff *skb;
43676 unsigned long flags;
43677- atomic_inc(&pau_qps_destroyed);
43678+ atomic_inc_unchecked(&pau_qps_destroyed);
43679
43680 /* Free packets that have not yet been forwarded */
43681 /* Lock is acquired by skb_dequeue when removing the skb */
43682@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43683 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43684 skb_queue_head_init(&nesqp->pau_list);
43685 spin_lock_init(&nesqp->pau_lock);
43686- atomic_inc(&pau_qps_created);
43687+ atomic_inc_unchecked(&pau_qps_created);
43688 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43689 }
43690
43691diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43692index 70acda9..a96de9d 100644
43693--- a/drivers/infiniband/hw/nes/nes_nic.c
43694+++ b/drivers/infiniband/hw/nes/nes_nic.c
43695@@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43696 target_stat_values[++index] = mh_detected;
43697 target_stat_values[++index] = mh_pauses_sent;
43698 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43699- target_stat_values[++index] = atomic_read(&cm_connects);
43700- target_stat_values[++index] = atomic_read(&cm_accepts);
43701- target_stat_values[++index] = atomic_read(&cm_disconnects);
43702- target_stat_values[++index] = atomic_read(&cm_connecteds);
43703- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43704- target_stat_values[++index] = atomic_read(&cm_rejects);
43705- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43706- target_stat_values[++index] = atomic_read(&qps_created);
43707- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43708- target_stat_values[++index] = atomic_read(&qps_destroyed);
43709- target_stat_values[++index] = atomic_read(&cm_closes);
43710+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43711+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43712+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43713+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43714+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43715+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43716+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43717+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43718+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43719+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43720+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43721 target_stat_values[++index] = cm_packets_sent;
43722 target_stat_values[++index] = cm_packets_bounced;
43723 target_stat_values[++index] = cm_packets_created;
43724 target_stat_values[++index] = cm_packets_received;
43725 target_stat_values[++index] = cm_packets_dropped;
43726 target_stat_values[++index] = cm_packets_retrans;
43727- target_stat_values[++index] = atomic_read(&cm_listens_created);
43728- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43729+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43730+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43731 target_stat_values[++index] = cm_backlog_drops;
43732- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43733- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43734- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43735- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43736- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43737+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43738+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43739+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43740+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43741+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43742 target_stat_values[++index] = nesadapter->free_4kpbl;
43743 target_stat_values[++index] = nesadapter->free_256pbl;
43744 target_stat_values[++index] = int_mod_timer_init;
43745 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43746 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43747 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43748- target_stat_values[++index] = atomic_read(&pau_qps_created);
43749- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43750+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43751+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43752 }
43753
43754 /**
43755diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43756index c0d0296..3185f57 100644
43757--- a/drivers/infiniband/hw/nes/nes_verbs.c
43758+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43759@@ -46,9 +46,9 @@
43760
43761 #include <rdma/ib_umem.h>
43762
43763-atomic_t mod_qp_timouts;
43764-atomic_t qps_created;
43765-atomic_t sw_qps_destroyed;
43766+atomic_unchecked_t mod_qp_timouts;
43767+atomic_unchecked_t qps_created;
43768+atomic_unchecked_t sw_qps_destroyed;
43769
43770 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43771
43772@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43773 if (init_attr->create_flags)
43774 return ERR_PTR(-EINVAL);
43775
43776- atomic_inc(&qps_created);
43777+ atomic_inc_unchecked(&qps_created);
43778 switch (init_attr->qp_type) {
43779 case IB_QPT_RC:
43780 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43781@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43782 struct iw_cm_event cm_event;
43783 int ret = 0;
43784
43785- atomic_inc(&sw_qps_destroyed);
43786+ atomic_inc_unchecked(&sw_qps_destroyed);
43787 nesqp->destroyed = 1;
43788
43789 /* Blow away the connection if it exists. */
43790diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43791index ffd48bf..83cdb56 100644
43792--- a/drivers/infiniband/hw/qib/qib.h
43793+++ b/drivers/infiniband/hw/qib/qib.h
43794@@ -52,6 +52,7 @@
43795 #include <linux/kref.h>
43796 #include <linux/sched.h>
43797 #include <linux/kthread.h>
43798+#include <linux/slab.h>
43799
43800 #include "qib_common.h"
43801 #include "qib_verbs.h"
43802diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43803index cdc7df4..a2fdfdb 100644
43804--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43805+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43806@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43807 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43808 }
43809
43810-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43811+static struct rtnl_link_ops ipoib_link_ops = {
43812 .kind = "ipoib",
43813 .maxtype = IFLA_IPOIB_MAX,
43814 .policy = ipoib_policy,
43815diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43816index e853a21..56fc5a8 100644
43817--- a/drivers/input/gameport/gameport.c
43818+++ b/drivers/input/gameport/gameport.c
43819@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43820 */
43821 static void gameport_init_port(struct gameport *gameport)
43822 {
43823- static atomic_t gameport_no = ATOMIC_INIT(-1);
43824+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43825
43826 __module_get(THIS_MODULE);
43827
43828 mutex_init(&gameport->drv_mutex);
43829 device_initialize(&gameport->dev);
43830 dev_set_name(&gameport->dev, "gameport%lu",
43831- (unsigned long)atomic_inc_return(&gameport_no));
43832+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43833 gameport->dev.bus = &gameport_bus;
43834 gameport->dev.release = gameport_release_port;
43835 if (gameport->parent)
43836diff --git a/drivers/input/input.c b/drivers/input/input.c
43837index cc357f1..ee42fbc 100644
43838--- a/drivers/input/input.c
43839+++ b/drivers/input/input.c
43840@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
43841 */
43842 struct input_dev *input_allocate_device(void)
43843 {
43844- static atomic_t input_no = ATOMIC_INIT(-1);
43845+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43846 struct input_dev *dev;
43847
43848 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43849@@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
43850 INIT_LIST_HEAD(&dev->node);
43851
43852 dev_set_name(&dev->dev, "input%lu",
43853- (unsigned long)atomic_inc_return(&input_no));
43854+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43855
43856 __module_get(THIS_MODULE);
43857 }
43858diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43859index 4a95b22..874c182 100644
43860--- a/drivers/input/joystick/sidewinder.c
43861+++ b/drivers/input/joystick/sidewinder.c
43862@@ -30,6 +30,7 @@
43863 #include <linux/kernel.h>
43864 #include <linux/module.h>
43865 #include <linux/slab.h>
43866+#include <linux/sched.h>
43867 #include <linux/input.h>
43868 #include <linux/gameport.h>
43869 #include <linux/jiffies.h>
43870diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43871index 3aa2f3f..53c00ea 100644
43872--- a/drivers/input/joystick/xpad.c
43873+++ b/drivers/input/joystick/xpad.c
43874@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43875
43876 static int xpad_led_probe(struct usb_xpad *xpad)
43877 {
43878- static atomic_t led_seq = ATOMIC_INIT(-1);
43879+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43880 unsigned long led_no;
43881 struct xpad_led *led;
43882 struct led_classdev *led_cdev;
43883@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43884 if (!led)
43885 return -ENOMEM;
43886
43887- led_no = atomic_inc_return(&led_seq);
43888+ led_no = atomic_inc_return_unchecked(&led_seq);
43889
43890 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43891 led->xpad = xpad;
43892diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43893index ac1fa5f..5f7502c 100644
43894--- a/drivers/input/misc/ims-pcu.c
43895+++ b/drivers/input/misc/ims-pcu.c
43896@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43897
43898 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43899 {
43900- static atomic_t device_no = ATOMIC_INIT(-1);
43901+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43902
43903 const struct ims_pcu_device_info *info;
43904 int error;
43905@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43906 }
43907
43908 /* Device appears to be operable, complete initialization */
43909- pcu->device_no = atomic_inc_return(&device_no);
43910+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43911
43912 /*
43913 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43914diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43915index d02e1bd..d719719 100644
43916--- a/drivers/input/mouse/psmouse.h
43917+++ b/drivers/input/mouse/psmouse.h
43918@@ -124,7 +124,7 @@ struct psmouse_attribute {
43919 ssize_t (*set)(struct psmouse *psmouse, void *data,
43920 const char *buf, size_t count);
43921 bool protect;
43922-};
43923+} __do_const;
43924 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43925
43926 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43927diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43928index b604564..3f14ae4 100644
43929--- a/drivers/input/mousedev.c
43930+++ b/drivers/input/mousedev.c
43931@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43932
43933 spin_unlock_irq(&client->packet_lock);
43934
43935- if (copy_to_user(buffer, data, count))
43936+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43937 return -EFAULT;
43938
43939 return count;
43940diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43941index a05a517..323a2fd 100644
43942--- a/drivers/input/serio/serio.c
43943+++ b/drivers/input/serio/serio.c
43944@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43945 */
43946 static void serio_init_port(struct serio *serio)
43947 {
43948- static atomic_t serio_no = ATOMIC_INIT(-1);
43949+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43950
43951 __module_get(THIS_MODULE);
43952
43953@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43954 mutex_init(&serio->drv_mutex);
43955 device_initialize(&serio->dev);
43956 dev_set_name(&serio->dev, "serio%lu",
43957- (unsigned long)atomic_inc_return(&serio_no));
43958+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43959 serio->dev.bus = &serio_bus;
43960 serio->dev.release = serio_release_port;
43961 serio->dev.groups = serio_device_attr_groups;
43962diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43963index 71ef5d6..93380a9 100644
43964--- a/drivers/input/serio/serio_raw.c
43965+++ b/drivers/input/serio/serio_raw.c
43966@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43967
43968 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43969 {
43970- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43971+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43972 struct serio_raw *serio_raw;
43973 int err;
43974
43975@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43976 }
43977
43978 snprintf(serio_raw->name, sizeof(serio_raw->name),
43979- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43980+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43981 kref_init(&serio_raw->kref);
43982 INIT_LIST_HEAD(&serio_raw->client_list);
43983 init_waitqueue_head(&serio_raw->wait);
43984diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
43985index 92e2243..8fd9092 100644
43986--- a/drivers/input/touchscreen/htcpen.c
43987+++ b/drivers/input/touchscreen/htcpen.c
43988@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
43989 }
43990 };
43991
43992-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
43993+static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
43994 {
43995 .ident = "Shift",
43996 .matches = {
43997diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43998index 48882c1..93e0987 100644
43999--- a/drivers/iommu/amd_iommu.c
44000+++ b/drivers/iommu/amd_iommu.c
44001@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
44002
44003 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
44004 {
44005+ phys_addr_t physaddr;
44006 WARN_ON(address & 0x7ULL);
44007
44008 memset(cmd, 0, sizeof(*cmd));
44009- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
44010- cmd->data[1] = upper_32_bits(__pa(address));
44011+
44012+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
44013+ if (object_starts_on_stack((void *)address)) {
44014+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
44015+ physaddr = __pa((u64)adjbuf);
44016+ } else
44017+#endif
44018+ physaddr = __pa(address);
44019+
44020+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
44021+ cmd->data[1] = upper_32_bits(physaddr);
44022 cmd->data[2] = 1;
44023 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
44024 }
44025diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44026index a3adde6..988ee96 100644
44027--- a/drivers/iommu/arm-smmu.c
44028+++ b/drivers/iommu/arm-smmu.c
44029@@ -338,7 +338,7 @@ enum arm_smmu_domain_stage {
44030
44031 struct arm_smmu_domain {
44032 struct arm_smmu_device *smmu;
44033- struct io_pgtable_ops *pgtbl_ops;
44034+ struct io_pgtable *pgtbl;
44035 spinlock_t pgtbl_lock;
44036 struct arm_smmu_cfg cfg;
44037 enum arm_smmu_domain_stage stage;
44038@@ -833,7 +833,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44039 {
44040 int irq, start, ret = 0;
44041 unsigned long ias, oas;
44042- struct io_pgtable_ops *pgtbl_ops;
44043+ struct io_pgtable *pgtbl;
44044 struct io_pgtable_cfg pgtbl_cfg;
44045 enum io_pgtable_fmt fmt;
44046 struct arm_smmu_domain *smmu_domain = domain->priv;
44047@@ -918,14 +918,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44048 };
44049
44050 smmu_domain->smmu = smmu;
44051- pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
44052- if (!pgtbl_ops) {
44053+ pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
44054+ if (!pgtbl) {
44055 ret = -ENOMEM;
44056 goto out_clear_smmu;
44057 }
44058
44059 /* Update our support page sizes to reflect the page table format */
44060- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44061+ pax_open_kernel();
44062+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44063+ pax_close_kernel();
44064
44065 /* Initialise the context bank with our page table cfg */
44066 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
44067@@ -946,7 +948,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44068 mutex_unlock(&smmu_domain->init_mutex);
44069
44070 /* Publish page table ops for map/unmap */
44071- smmu_domain->pgtbl_ops = pgtbl_ops;
44072+ smmu_domain->pgtbl = pgtbl;
44073 return 0;
44074
44075 out_clear_smmu:
44076@@ -979,8 +981,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
44077 free_irq(irq, domain);
44078 }
44079
44080- if (smmu_domain->pgtbl_ops)
44081- free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44082+ free_io_pgtable(smmu_domain->pgtbl);
44083
44084 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
44085 }
44086@@ -1204,13 +1205,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
44087 int ret;
44088 unsigned long flags;
44089 struct arm_smmu_domain *smmu_domain = domain->priv;
44090- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44091+ struct io_pgtable *iop = smmu_domain->pgtbl;
44092
44093- if (!ops)
44094+ if (!iop)
44095 return -ENODEV;
44096
44097 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44098- ret = ops->map(ops, iova, paddr, size, prot);
44099+ ret = iop->ops->map(iop, iova, paddr, size, prot);
44100 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44101 return ret;
44102 }
44103@@ -1221,13 +1222,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
44104 size_t ret;
44105 unsigned long flags;
44106 struct arm_smmu_domain *smmu_domain = domain->priv;
44107- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44108+ struct io_pgtable *iop = smmu_domain->pgtbl;
44109
44110- if (!ops)
44111+ if (!iop)
44112 return 0;
44113
44114 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44115- ret = ops->unmap(ops, iova, size);
44116+ ret = iop->ops->unmap(iop, iova, size);
44117 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44118 return ret;
44119 }
44120@@ -1238,7 +1239,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44121 struct arm_smmu_domain *smmu_domain = domain->priv;
44122 struct arm_smmu_device *smmu = smmu_domain->smmu;
44123 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
44124- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44125+ struct io_pgtable *iop = smmu_domain->pgtbl;
44126 struct device *dev = smmu->dev;
44127 void __iomem *cb_base;
44128 u32 tmp;
44129@@ -1261,7 +1262,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44130 dev_err(dev,
44131 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
44132 &iova);
44133- return ops->iova_to_phys(ops, iova);
44134+ return iop->ops->iova_to_phys(iop, iova);
44135 }
44136
44137 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
44138@@ -1282,9 +1283,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44139 phys_addr_t ret;
44140 unsigned long flags;
44141 struct arm_smmu_domain *smmu_domain = domain->priv;
44142- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44143+ struct io_pgtable *iop = smmu_domain->pgtbl;
44144
44145- if (!ops)
44146+ if (!iop)
44147 return 0;
44148
44149 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44150@@ -1292,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44151 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
44152 ret = arm_smmu_iova_to_phys_hard(domain, iova);
44153 } else {
44154- ret = ops->iova_to_phys(ops, iova);
44155+ ret = iop->ops->iova_to_phys(iop, iova);
44156 }
44157
44158 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44159@@ -1651,7 +1652,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
44160 size |= SZ_64K | SZ_512M;
44161 }
44162
44163- arm_smmu_ops.pgsize_bitmap &= size;
44164+ pax_open_kernel();
44165+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap &= size;
44166+ pax_close_kernel();
44167 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
44168
44169 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
44170diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
44171index b610a8d..08eb879 100644
44172--- a/drivers/iommu/io-pgtable-arm.c
44173+++ b/drivers/iommu/io-pgtable-arm.c
44174@@ -36,12 +36,6 @@
44175 #define io_pgtable_to_data(x) \
44176 container_of((x), struct arm_lpae_io_pgtable, iop)
44177
44178-#define io_pgtable_ops_to_pgtable(x) \
44179- container_of((x), struct io_pgtable, ops)
44180-
44181-#define io_pgtable_ops_to_data(x) \
44182- io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44183-
44184 /*
44185 * For consistency with the architecture, we always consider
44186 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
44187@@ -302,10 +296,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
44188 return pte;
44189 }
44190
44191-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
44192+static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
44193 phys_addr_t paddr, size_t size, int iommu_prot)
44194 {
44195- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44196+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44197 arm_lpae_iopte *ptep = data->pgd;
44198 int lvl = ARM_LPAE_START_LVL(data);
44199 arm_lpae_iopte prot;
44200@@ -445,12 +439,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
44201 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
44202 }
44203
44204-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44205+static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
44206 size_t size)
44207 {
44208 size_t unmapped;
44209- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44210- struct io_pgtable *iop = &data->iop;
44211+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44212 arm_lpae_iopte *ptep = data->pgd;
44213 int lvl = ARM_LPAE_START_LVL(data);
44214
44215@@ -461,10 +454,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44216 return unmapped;
44217 }
44218
44219-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
44220+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
44221 unsigned long iova)
44222 {
44223- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44224+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44225 arm_lpae_iopte pte, *ptep = data->pgd;
44226 int lvl = ARM_LPAE_START_LVL(data);
44227
44228@@ -531,6 +524,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
44229 }
44230 }
44231
44232+static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
44233+ .map = arm_lpae_map,
44234+ .unmap = arm_lpae_unmap,
44235+ .iova_to_phys = arm_lpae_iova_to_phys,
44236+};
44237+
44238 static struct arm_lpae_io_pgtable *
44239 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44240 {
44241@@ -562,11 +561,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44242 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
44243 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
44244
44245- data->iop.ops = (struct io_pgtable_ops) {
44246- .map = arm_lpae_map,
44247- .unmap = arm_lpae_unmap,
44248- .iova_to_phys = arm_lpae_iova_to_phys,
44249- };
44250+ data->iop.ops = &arm_lpae_io_pgtable_ops;
44251
44252 return data;
44253 }
44254@@ -825,9 +820,9 @@ static struct iommu_gather_ops dummy_tlb_ops __initdata = {
44255 .flush_pgtable = dummy_flush_pgtable,
44256 };
44257
44258-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44259+static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
44260 {
44261- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44262+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44263 struct io_pgtable_cfg *cfg = &data->iop.cfg;
44264
44265 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
44266@@ -837,9 +832,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44267 data->bits_per_level, data->pgd);
44268 }
44269
44270-#define __FAIL(ops, i) ({ \
44271+#define __FAIL(iop, i) ({ \
44272 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
44273- arm_lpae_dump_ops(ops); \
44274+ arm_lpae_dump_ops(iop); \
44275 selftest_running = false; \
44276 -EFAULT; \
44277 })
44278@@ -854,30 +849,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44279 int i, j;
44280 unsigned long iova;
44281 size_t size;
44282- struct io_pgtable_ops *ops;
44283+ struct io_pgtable *iop;
44284+ const struct io_pgtable_ops *ops;
44285
44286 selftest_running = true;
44287
44288 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
44289 cfg_cookie = cfg;
44290- ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
44291- if (!ops) {
44292+ iop = alloc_io_pgtable(fmts[i], cfg, cfg);
44293+ if (!iop) {
44294 pr_err("selftest: failed to allocate io pgtable ops\n");
44295 return -ENOMEM;
44296 }
44297+ ops = iop->ops;
44298
44299 /*
44300 * Initial sanity checks.
44301 * Empty page tables shouldn't provide any translations.
44302 */
44303- if (ops->iova_to_phys(ops, 42))
44304- return __FAIL(ops, i);
44305+ if (ops->iova_to_phys(iop, 42))
44306+ return __FAIL(iop, i);
44307
44308- if (ops->iova_to_phys(ops, SZ_1G + 42))
44309- return __FAIL(ops, i);
44310+ if (ops->iova_to_phys(iop, SZ_1G + 42))
44311+ return __FAIL(iop, i);
44312
44313- if (ops->iova_to_phys(ops, SZ_2G + 42))
44314- return __FAIL(ops, i);
44315+ if (ops->iova_to_phys(iop, SZ_2G + 42))
44316+ return __FAIL(iop, i);
44317
44318 /*
44319 * Distinct mappings of different granule sizes.
44320@@ -887,19 +884,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44321 while (j != BITS_PER_LONG) {
44322 size = 1UL << j;
44323
44324- if (ops->map(ops, iova, iova, size, IOMMU_READ |
44325+ if (ops->map(iop, iova, iova, size, IOMMU_READ |
44326 IOMMU_WRITE |
44327 IOMMU_NOEXEC |
44328 IOMMU_CACHE))
44329- return __FAIL(ops, i);
44330+ return __FAIL(iop, i);
44331
44332 /* Overlapping mappings */
44333- if (!ops->map(ops, iova, iova + size, size,
44334+ if (!ops->map(iop, iova, iova + size, size,
44335 IOMMU_READ | IOMMU_NOEXEC))
44336- return __FAIL(ops, i);
44337+ return __FAIL(iop, i);
44338
44339- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44340- return __FAIL(ops, i);
44341+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44342+ return __FAIL(iop, i);
44343
44344 iova += SZ_1G;
44345 j++;
44346@@ -908,15 +905,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44347
44348 /* Partial unmap */
44349 size = 1UL << __ffs(cfg->pgsize_bitmap);
44350- if (ops->unmap(ops, SZ_1G + size, size) != size)
44351- return __FAIL(ops, i);
44352+ if (ops->unmap(iop, SZ_1G + size, size) != size)
44353+ return __FAIL(iop, i);
44354
44355 /* Remap of partial unmap */
44356- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
44357- return __FAIL(ops, i);
44358+ if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
44359+ return __FAIL(iop, i);
44360
44361- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
44362- return __FAIL(ops, i);
44363+ if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
44364+ return __FAIL(iop, i);
44365
44366 /* Full unmap */
44367 iova = 0;
44368@@ -924,25 +921,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44369 while (j != BITS_PER_LONG) {
44370 size = 1UL << j;
44371
44372- if (ops->unmap(ops, iova, size) != size)
44373- return __FAIL(ops, i);
44374+ if (ops->unmap(iop, iova, size) != size)
44375+ return __FAIL(iop, i);
44376
44377- if (ops->iova_to_phys(ops, iova + 42))
44378- return __FAIL(ops, i);
44379+ if (ops->iova_to_phys(iop, iova + 42))
44380+ return __FAIL(iop, i);
44381
44382 /* Remap full block */
44383- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
44384- return __FAIL(ops, i);
44385+ if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
44386+ return __FAIL(iop, i);
44387
44388- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44389- return __FAIL(ops, i);
44390+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44391+ return __FAIL(iop, i);
44392
44393 iova += SZ_1G;
44394 j++;
44395 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
44396 }
44397
44398- free_io_pgtable_ops(ops);
44399+ free_io_pgtable(iop);
44400 }
44401
44402 selftest_running = false;
44403diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
44404index 6436fe2..088c965 100644
44405--- a/drivers/iommu/io-pgtable.c
44406+++ b/drivers/iommu/io-pgtable.c
44407@@ -40,7 +40,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
44408 #endif
44409 };
44410
44411-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44412+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44413 struct io_pgtable_cfg *cfg,
44414 void *cookie)
44415 {
44416@@ -62,21 +62,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44417 iop->cookie = cookie;
44418 iop->cfg = *cfg;
44419
44420- return &iop->ops;
44421+ return iop;
44422 }
44423
44424 /*
44425 * It is the IOMMU driver's responsibility to ensure that the page table
44426 * is no longer accessible to the walker by this point.
44427 */
44428-void free_io_pgtable_ops(struct io_pgtable_ops *ops)
44429+void free_io_pgtable(struct io_pgtable *iop)
44430 {
44431- struct io_pgtable *iop;
44432-
44433- if (!ops)
44434+ if (!iop)
44435 return;
44436
44437- iop = container_of(ops, struct io_pgtable, ops);
44438 iop->cfg.tlb->tlb_flush_all(iop->cookie);
44439 io_pgtable_init_table[iop->fmt]->free(iop);
44440 }
44441diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
44442index 10e32f6..0b276c8 100644
44443--- a/drivers/iommu/io-pgtable.h
44444+++ b/drivers/iommu/io-pgtable.h
44445@@ -75,17 +75,18 @@ struct io_pgtable_cfg {
44446 * These functions map directly onto the iommu_ops member functions with
44447 * the same names.
44448 */
44449+struct io_pgtable;
44450 struct io_pgtable_ops {
44451- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
44452+ int (*map)(struct io_pgtable *iop, unsigned long iova,
44453 phys_addr_t paddr, size_t size, int prot);
44454- int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
44455+ int (*unmap)(struct io_pgtable *iop, unsigned long iova,
44456 size_t size);
44457- phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
44458+ phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
44459 unsigned long iova);
44460 };
44461
44462 /**
44463- * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
44464+ * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
44465 *
44466 * @fmt: The page table format.
44467 * @cfg: The page table configuration. This will be modified to represent
44468@@ -94,9 +95,9 @@ struct io_pgtable_ops {
44469 * @cookie: An opaque token provided by the IOMMU driver and passed back to
44470 * the callback routines in cfg->tlb.
44471 */
44472-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44473- struct io_pgtable_cfg *cfg,
44474- void *cookie);
44475+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44476+ struct io_pgtable_cfg *cfg,
44477+ void *cookie);
44478
44479 /**
44480 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
44481@@ -105,7 +106,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44482 *
44483 * @ops: The ops returned from alloc_io_pgtable_ops.
44484 */
44485-void free_io_pgtable_ops(struct io_pgtable_ops *ops);
44486+void free_io_pgtable(struct io_pgtable *iop);
44487
44488
44489 /*
44490@@ -125,7 +126,7 @@ struct io_pgtable {
44491 enum io_pgtable_fmt fmt;
44492 void *cookie;
44493 struct io_pgtable_cfg cfg;
44494- struct io_pgtable_ops ops;
44495+ const struct io_pgtable_ops *ops;
44496 };
44497
44498 /**
44499diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44500index 72e683d..c9db262 100644
44501--- a/drivers/iommu/iommu.c
44502+++ b/drivers/iommu/iommu.c
44503@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
44504 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
44505 {
44506 int err;
44507- struct notifier_block *nb;
44508+ notifier_block_no_const *nb;
44509 struct iommu_callback_data cb = {
44510 .ops = ops,
44511 };
44512diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
44513index bc39bdf..e2de272 100644
44514--- a/drivers/iommu/ipmmu-vmsa.c
44515+++ b/drivers/iommu/ipmmu-vmsa.c
44516@@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
44517 struct iommu_domain *io_domain;
44518
44519 struct io_pgtable_cfg cfg;
44520- struct io_pgtable_ops *iop;
44521+ struct io_pgtable *iop;
44522
44523 unsigned int context_id;
44524 spinlock_t lock; /* Protects mappings */
44525@@ -323,8 +323,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
44526 domain->cfg.oas = 40;
44527 domain->cfg.tlb = &ipmmu_gather_ops;
44528
44529- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
44530- domain);
44531+ domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
44532 if (!domain->iop)
44533 return -EINVAL;
44534
44535@@ -482,7 +481,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
44536 * been detached.
44537 */
44538 ipmmu_domain_destroy_context(domain);
44539- free_io_pgtable_ops(domain->iop);
44540+ free_io_pgtable(domain->iop);
44541 kfree(domain);
44542 }
44543
44544@@ -551,7 +550,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
44545 if (!domain)
44546 return -ENODEV;
44547
44548- return domain->iop->map(domain->iop, iova, paddr, size, prot);
44549+ return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
44550 }
44551
44552 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44553@@ -559,7 +558,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44554 {
44555 struct ipmmu_vmsa_domain *domain = io_domain->priv;
44556
44557- return domain->iop->unmap(domain->iop, iova, size);
44558+ return domain->iop->ops->unmap(domain->iop, iova, size);
44559 }
44560
44561 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44562@@ -569,7 +568,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44563
44564 /* TODO: Is locking needed ? */
44565
44566- return domain->iop->iova_to_phys(domain->iop, iova);
44567+ return domain->iop->ops->iova_to_phys(domain->iop, iova);
44568 }
44569
44570 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
44571diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44572index 390079e..1da9d6c 100644
44573--- a/drivers/iommu/irq_remapping.c
44574+++ b/drivers/iommu/irq_remapping.c
44575@@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44576 void panic_if_irq_remap(const char *msg)
44577 {
44578 if (irq_remapping_enabled)
44579- panic(msg);
44580+ panic("%s", msg);
44581 }
44582
44583 static void ir_ack_apic_edge(struct irq_data *data)
44584@@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44585
44586 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44587 {
44588- chip->irq_print_chip = ir_print_prefix;
44589- chip->irq_ack = ir_ack_apic_edge;
44590- chip->irq_eoi = ir_ack_apic_level;
44591- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44592+ pax_open_kernel();
44593+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44594+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44595+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44596+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44597+ pax_close_kernel();
44598 }
44599
44600 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44601diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44602index 471e1cd..b53b870 100644
44603--- a/drivers/irqchip/irq-gic.c
44604+++ b/drivers/irqchip/irq-gic.c
44605@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44606 * Supported arch specific GIC irq extension.
44607 * Default make them NULL.
44608 */
44609-struct irq_chip gic_arch_extn = {
44610+irq_chip_no_const gic_arch_extn = {
44611 .irq_eoi = NULL,
44612 .irq_mask = NULL,
44613 .irq_unmask = NULL,
44614@@ -318,7 +318,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44615 chained_irq_exit(chip, desc);
44616 }
44617
44618-static struct irq_chip gic_chip = {
44619+static irq_chip_no_const gic_chip __read_only = {
44620 .name = "GIC",
44621 .irq_mask = gic_mask_irq,
44622 .irq_unmask = gic_unmask_irq,
44623diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
44624index 9a0767b..5e5f86f 100644
44625--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
44626+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
44627@@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
44628 struct intc_irqpin_iomem *i;
44629 struct resource *io[INTC_IRQPIN_REG_NR];
44630 struct resource *irq;
44631- struct irq_chip *irq_chip;
44632+ irq_chip_no_const *irq_chip;
44633 void (*enable_fn)(struct irq_data *d);
44634 void (*disable_fn)(struct irq_data *d);
44635 const char *name = dev_name(dev);
44636diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44637index 384e6ed..7a771b2 100644
44638--- a/drivers/irqchip/irq-renesas-irqc.c
44639+++ b/drivers/irqchip/irq-renesas-irqc.c
44640@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44641 struct irqc_priv *p;
44642 struct resource *io;
44643 struct resource *irq;
44644- struct irq_chip *irq_chip;
44645+ irq_chip_no_const *irq_chip;
44646 const char *name = dev_name(&pdev->dev);
44647 int ret;
44648 int k;
44649diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44650index 6a2df32..dc962f1 100644
44651--- a/drivers/isdn/capi/capi.c
44652+++ b/drivers/isdn/capi/capi.c
44653@@ -81,8 +81,8 @@ struct capiminor {
44654
44655 struct capi20_appl *ap;
44656 u32 ncci;
44657- atomic_t datahandle;
44658- atomic_t msgid;
44659+ atomic_unchecked_t datahandle;
44660+ atomic_unchecked_t msgid;
44661
44662 struct tty_port port;
44663 int ttyinstop;
44664@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44665 capimsg_setu16(s, 2, mp->ap->applid);
44666 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44667 capimsg_setu8 (s, 5, CAPI_RESP);
44668- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44669+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44670 capimsg_setu32(s, 8, mp->ncci);
44671 capimsg_setu16(s, 12, datahandle);
44672 }
44673@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44674 mp->outbytes -= len;
44675 spin_unlock_bh(&mp->outlock);
44676
44677- datahandle = atomic_inc_return(&mp->datahandle);
44678+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44679 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44680 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44681 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44682 capimsg_setu16(skb->data, 2, mp->ap->applid);
44683 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44684 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44685- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44686+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44687 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44688 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44689 capimsg_setu16(skb->data, 16, len); /* Data length */
44690diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44691index aecec6d..11e13c5 100644
44692--- a/drivers/isdn/gigaset/bas-gigaset.c
44693+++ b/drivers/isdn/gigaset/bas-gigaset.c
44694@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44695
44696
44697 static const struct gigaset_ops gigops = {
44698- gigaset_write_cmd,
44699- gigaset_write_room,
44700- gigaset_chars_in_buffer,
44701- gigaset_brkchars,
44702- gigaset_init_bchannel,
44703- gigaset_close_bchannel,
44704- gigaset_initbcshw,
44705- gigaset_freebcshw,
44706- gigaset_reinitbcshw,
44707- gigaset_initcshw,
44708- gigaset_freecshw,
44709- gigaset_set_modem_ctrl,
44710- gigaset_baud_rate,
44711- gigaset_set_line_ctrl,
44712- gigaset_isoc_send_skb,
44713- gigaset_isoc_input,
44714+ .write_cmd = gigaset_write_cmd,
44715+ .write_room = gigaset_write_room,
44716+ .chars_in_buffer = gigaset_chars_in_buffer,
44717+ .brkchars = gigaset_brkchars,
44718+ .init_bchannel = gigaset_init_bchannel,
44719+ .close_bchannel = gigaset_close_bchannel,
44720+ .initbcshw = gigaset_initbcshw,
44721+ .freebcshw = gigaset_freebcshw,
44722+ .reinitbcshw = gigaset_reinitbcshw,
44723+ .initcshw = gigaset_initcshw,
44724+ .freecshw = gigaset_freecshw,
44725+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44726+ .baud_rate = gigaset_baud_rate,
44727+ .set_line_ctrl = gigaset_set_line_ctrl,
44728+ .send_skb = gigaset_isoc_send_skb,
44729+ .handle_input = gigaset_isoc_input,
44730 };
44731
44732 /* bas_gigaset_init
44733diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44734index 600c79b..3752bab 100644
44735--- a/drivers/isdn/gigaset/interface.c
44736+++ b/drivers/isdn/gigaset/interface.c
44737@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44738 }
44739 tty->driver_data = cs;
44740
44741- ++cs->port.count;
44742+ atomic_inc(&cs->port.count);
44743
44744- if (cs->port.count == 1) {
44745+ if (atomic_read(&cs->port.count) == 1) {
44746 tty_port_tty_set(&cs->port, tty);
44747 cs->port.low_latency = 1;
44748 }
44749@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
44750
44751 if (!cs->connected)
44752 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
44753- else if (!cs->port.count)
44754+ else if (!atomic_read(&cs->port.count))
44755 dev_warn(cs->dev, "%s: device not opened\n", __func__);
44756- else if (!--cs->port.count)
44757+ else if (!atomic_dec_return(&cs->port.count))
44758 tty_port_tty_set(&cs->port, NULL);
44759
44760 mutex_unlock(&cs->mutex);
44761diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
44762index 8c91fd5..14f13ce 100644
44763--- a/drivers/isdn/gigaset/ser-gigaset.c
44764+++ b/drivers/isdn/gigaset/ser-gigaset.c
44765@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
44766 }
44767
44768 static const struct gigaset_ops ops = {
44769- gigaset_write_cmd,
44770- gigaset_write_room,
44771- gigaset_chars_in_buffer,
44772- gigaset_brkchars,
44773- gigaset_init_bchannel,
44774- gigaset_close_bchannel,
44775- gigaset_initbcshw,
44776- gigaset_freebcshw,
44777- gigaset_reinitbcshw,
44778- gigaset_initcshw,
44779- gigaset_freecshw,
44780- gigaset_set_modem_ctrl,
44781- gigaset_baud_rate,
44782- gigaset_set_line_ctrl,
44783- gigaset_m10x_send_skb, /* asyncdata.c */
44784- gigaset_m10x_input, /* asyncdata.c */
44785+ .write_cmd = gigaset_write_cmd,
44786+ .write_room = gigaset_write_room,
44787+ .chars_in_buffer = gigaset_chars_in_buffer,
44788+ .brkchars = gigaset_brkchars,
44789+ .init_bchannel = gigaset_init_bchannel,
44790+ .close_bchannel = gigaset_close_bchannel,
44791+ .initbcshw = gigaset_initbcshw,
44792+ .freebcshw = gigaset_freebcshw,
44793+ .reinitbcshw = gigaset_reinitbcshw,
44794+ .initcshw = gigaset_initcshw,
44795+ .freecshw = gigaset_freecshw,
44796+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44797+ .baud_rate = gigaset_baud_rate,
44798+ .set_line_ctrl = gigaset_set_line_ctrl,
44799+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
44800+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
44801 };
44802
44803
44804diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
44805index 5f306e2..5342f88 100644
44806--- a/drivers/isdn/gigaset/usb-gigaset.c
44807+++ b/drivers/isdn/gigaset/usb-gigaset.c
44808@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
44809 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
44810 memcpy(cs->hw.usb->bchars, buf, 6);
44811 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
44812- 0, 0, &buf, 6, 2000);
44813+ 0, 0, buf, 6, 2000);
44814 }
44815
44816 static void gigaset_freebcshw(struct bc_state *bcs)
44817@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
44818 }
44819
44820 static const struct gigaset_ops ops = {
44821- gigaset_write_cmd,
44822- gigaset_write_room,
44823- gigaset_chars_in_buffer,
44824- gigaset_brkchars,
44825- gigaset_init_bchannel,
44826- gigaset_close_bchannel,
44827- gigaset_initbcshw,
44828- gigaset_freebcshw,
44829- gigaset_reinitbcshw,
44830- gigaset_initcshw,
44831- gigaset_freecshw,
44832- gigaset_set_modem_ctrl,
44833- gigaset_baud_rate,
44834- gigaset_set_line_ctrl,
44835- gigaset_m10x_send_skb,
44836- gigaset_m10x_input,
44837+ .write_cmd = gigaset_write_cmd,
44838+ .write_room = gigaset_write_room,
44839+ .chars_in_buffer = gigaset_chars_in_buffer,
44840+ .brkchars = gigaset_brkchars,
44841+ .init_bchannel = gigaset_init_bchannel,
44842+ .close_bchannel = gigaset_close_bchannel,
44843+ .initbcshw = gigaset_initbcshw,
44844+ .freebcshw = gigaset_freebcshw,
44845+ .reinitbcshw = gigaset_reinitbcshw,
44846+ .initcshw = gigaset_initcshw,
44847+ .freecshw = gigaset_freecshw,
44848+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44849+ .baud_rate = gigaset_baud_rate,
44850+ .set_line_ctrl = gigaset_set_line_ctrl,
44851+ .send_skb = gigaset_m10x_send_skb,
44852+ .handle_input = gigaset_m10x_input,
44853 };
44854
44855 /*
44856diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
44857index 4d9b195..455075c 100644
44858--- a/drivers/isdn/hardware/avm/b1.c
44859+++ b/drivers/isdn/hardware/avm/b1.c
44860@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
44861 }
44862 if (left) {
44863 if (t4file->user) {
44864- if (copy_from_user(buf, dp, left))
44865+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44866 return -EFAULT;
44867 } else {
44868 memcpy(buf, dp, left);
44869@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
44870 }
44871 if (left) {
44872 if (config->user) {
44873- if (copy_from_user(buf, dp, left))
44874+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44875 return -EFAULT;
44876 } else {
44877 memcpy(buf, dp, left);
44878diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
44879index 9b856e1..fa03c92 100644
44880--- a/drivers/isdn/i4l/isdn_common.c
44881+++ b/drivers/isdn/i4l/isdn_common.c
44882@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
44883 } else
44884 return -EINVAL;
44885 case IIOCDBGVAR:
44886+ if (!capable(CAP_SYS_RAWIO))
44887+ return -EPERM;
44888 if (arg) {
44889 if (copy_to_user(argp, &dev, sizeof(ulong)))
44890 return -EFAULT;
44891diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
44892index 91d5730..336523e 100644
44893--- a/drivers/isdn/i4l/isdn_concap.c
44894+++ b/drivers/isdn/i4l/isdn_concap.c
44895@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
44896 }
44897
44898 struct concap_device_ops isdn_concap_reliable_dl_dops = {
44899- &isdn_concap_dl_data_req,
44900- &isdn_concap_dl_connect_req,
44901- &isdn_concap_dl_disconn_req
44902+ .data_req = &isdn_concap_dl_data_req,
44903+ .connect_req = &isdn_concap_dl_connect_req,
44904+ .disconn_req = &isdn_concap_dl_disconn_req
44905 };
44906
44907 /* The following should better go into a dedicated source file such that
44908diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
44909index bc91261..2ef7e36 100644
44910--- a/drivers/isdn/i4l/isdn_tty.c
44911+++ b/drivers/isdn/i4l/isdn_tty.c
44912@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
44913
44914 #ifdef ISDN_DEBUG_MODEM_OPEN
44915 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
44916- port->count);
44917+ atomic_read(&port->count));
44918 #endif
44919- port->count++;
44920+ atomic_inc(&port->count);
44921 port->tty = tty;
44922 /*
44923 * Start up serial port
44924@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44925 #endif
44926 return;
44927 }
44928- if ((tty->count == 1) && (port->count != 1)) {
44929+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
44930 /*
44931 * Uh, oh. tty->count is 1, which means that the tty
44932 * structure will be freed. Info->count should always
44933@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44934 * serial port won't be shutdown.
44935 */
44936 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
44937- "info->count is %d\n", port->count);
44938- port->count = 1;
44939+ "info->count is %d\n", atomic_read(&port->count));
44940+ atomic_set(&port->count, 1);
44941 }
44942- if (--port->count < 0) {
44943+ if (atomic_dec_return(&port->count) < 0) {
44944 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
44945- info->line, port->count);
44946- port->count = 0;
44947+ info->line, atomic_read(&port->count));
44948+ atomic_set(&port->count, 0);
44949 }
44950- if (port->count) {
44951+ if (atomic_read(&port->count)) {
44952 #ifdef ISDN_DEBUG_MODEM_OPEN
44953 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
44954 #endif
44955@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
44956 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
44957 return;
44958 isdn_tty_shutdown(info);
44959- port->count = 0;
44960+ atomic_set(&port->count, 0);
44961 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44962 port->tty = NULL;
44963 wake_up_interruptible(&port->open_wait);
44964@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
44965 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
44966 modem_info *info = &dev->mdm.info[i];
44967
44968- if (info->port.count == 0)
44969+ if (atomic_read(&info->port.count) == 0)
44970 continue;
44971 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
44972 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
44973diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
44974index e2d4e58..40cd045 100644
44975--- a/drivers/isdn/i4l/isdn_x25iface.c
44976+++ b/drivers/isdn/i4l/isdn_x25iface.c
44977@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
44978
44979
44980 static struct concap_proto_ops ix25_pops = {
44981- &isdn_x25iface_proto_new,
44982- &isdn_x25iface_proto_del,
44983- &isdn_x25iface_proto_restart,
44984- &isdn_x25iface_proto_close,
44985- &isdn_x25iface_xmit,
44986- &isdn_x25iface_receive,
44987- &isdn_x25iface_connect_ind,
44988- &isdn_x25iface_disconn_ind
44989+ .proto_new = &isdn_x25iface_proto_new,
44990+ .proto_del = &isdn_x25iface_proto_del,
44991+ .restart = &isdn_x25iface_proto_restart,
44992+ .close = &isdn_x25iface_proto_close,
44993+ .encap_and_xmit = &isdn_x25iface_xmit,
44994+ .data_ind = &isdn_x25iface_receive,
44995+ .connect_ind = &isdn_x25iface_connect_ind,
44996+ .disconn_ind = &isdn_x25iface_disconn_ind
44997 };
44998
44999 /* error message helper function */
45000diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45001index 358a574..b4987ea 100644
45002--- a/drivers/isdn/icn/icn.c
45003+++ b/drivers/isdn/icn/icn.c
45004@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45005 if (count > len)
45006 count = len;
45007 if (user) {
45008- if (copy_from_user(msg, buf, count))
45009+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45010 return -EFAULT;
45011 } else
45012 memcpy(msg, buf, count);
45013diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45014index 87f7dff..7300125 100644
45015--- a/drivers/isdn/mISDN/dsp_cmx.c
45016+++ b/drivers/isdn/mISDN/dsp_cmx.c
45017@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45018 static u16 dsp_count; /* last sample count */
45019 static int dsp_count_valid; /* if we have last sample count */
45020
45021-void
45022+void __intentional_overflow(-1)
45023 dsp_cmx_send(void *arg)
45024 {
45025 struct dsp_conf *conf;
45026diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
45027index 0f9ed1e..2715d6f 100644
45028--- a/drivers/leds/leds-clevo-mail.c
45029+++ b/drivers/leds/leds-clevo-mail.c
45030@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
45031 * detected as working, but in reality it is not) as low as
45032 * possible.
45033 */
45034-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
45035+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
45036 {
45037 .callback = clevo_mail_led_dmi_callback,
45038 .ident = "Clevo D410J",
45039diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
45040index 046cb70..6b20d39 100644
45041--- a/drivers/leds/leds-ss4200.c
45042+++ b/drivers/leds/leds-ss4200.c
45043@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
45044 * detected as working, but in reality it is not) as low as
45045 * possible.
45046 */
45047-static struct dmi_system_id nas_led_whitelist[] __initdata = {
45048+static struct dmi_system_id nas_led_whitelist[] __initconst = {
45049 {
45050 .callback = ss4200_led_dmi_callback,
45051 .ident = "Intel SS4200-E",
45052diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45053index 7dc93aa..8272379 100644
45054--- a/drivers/lguest/core.c
45055+++ b/drivers/lguest/core.c
45056@@ -96,9 +96,17 @@ static __init int map_switcher(void)
45057 * The end address needs +1 because __get_vm_area allocates an
45058 * extra guard page, so we need space for that.
45059 */
45060+
45061+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45062+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45063+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45064+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45065+#else
45066 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45067 VM_ALLOC, switcher_addr, switcher_addr
45068 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45069+#endif
45070+
45071 if (!switcher_vma) {
45072 err = -ENOMEM;
45073 printk("lguest: could not map switcher pages high\n");
45074@@ -121,7 +129,7 @@ static __init int map_switcher(void)
45075 * Now the Switcher is mapped at the right address, we can't fail!
45076 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45077 */
45078- memcpy(switcher_vma->addr, start_switcher_text,
45079+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45080 end_switcher_text - start_switcher_text);
45081
45082 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45083diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45084index e3abebc9..6a35328 100644
45085--- a/drivers/lguest/page_tables.c
45086+++ b/drivers/lguest/page_tables.c
45087@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45088 /*:*/
45089
45090 #ifdef CONFIG_X86_PAE
45091-static void release_pmd(pmd_t *spmd)
45092+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45093 {
45094 /* If the entry's not present, there's nothing to release. */
45095 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45096diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45097index 30f2aef..391c748 100644
45098--- a/drivers/lguest/x86/core.c
45099+++ b/drivers/lguest/x86/core.c
45100@@ -60,7 +60,7 @@ static struct {
45101 /* Offset from where switcher.S was compiled to where we've copied it */
45102 static unsigned long switcher_offset(void)
45103 {
45104- return switcher_addr - (unsigned long)start_switcher_text;
45105+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45106 }
45107
45108 /* This cpu's struct lguest_pages (after the Switcher text page) */
45109@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45110 * These copies are pretty cheap, so we do them unconditionally: */
45111 /* Save the current Host top-level page directory.
45112 */
45113+
45114+#ifdef CONFIG_PAX_PER_CPU_PGD
45115+ pages->state.host_cr3 = read_cr3();
45116+#else
45117 pages->state.host_cr3 = __pa(current->mm->pgd);
45118+#endif
45119+
45120 /*
45121 * Set up the Guest's page tables to see this CPU's pages (and no
45122 * other CPU's pages).
45123@@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
45124 * compiled-in switcher code and the high-mapped copy we just made.
45125 */
45126 for (i = 0; i < IDT_ENTRIES; i++)
45127- default_idt_entries[i] += switcher_offset();
45128+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45129
45130 /*
45131 * Set up the Switcher's per-cpu areas.
45132@@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
45133 * it will be undisturbed when we switch. To change %cs and jump we
45134 * need this structure to feed to Intel's "lcall" instruction.
45135 */
45136- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45137+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45138 lguest_entry.segment = LGUEST_CS;
45139
45140 /*
45141diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45142index 40634b0..4f5855e 100644
45143--- a/drivers/lguest/x86/switcher_32.S
45144+++ b/drivers/lguest/x86/switcher_32.S
45145@@ -87,6 +87,7 @@
45146 #include <asm/page.h>
45147 #include <asm/segment.h>
45148 #include <asm/lguest.h>
45149+#include <asm/processor-flags.h>
45150
45151 // We mark the start of the code to copy
45152 // It's placed in .text tho it's never run here
45153@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45154 // Changes type when we load it: damn Intel!
45155 // For after we switch over our page tables
45156 // That entry will be read-only: we'd crash.
45157+
45158+#ifdef CONFIG_PAX_KERNEXEC
45159+ mov %cr0, %edx
45160+ xor $X86_CR0_WP, %edx
45161+ mov %edx, %cr0
45162+#endif
45163+
45164 movl $(GDT_ENTRY_TSS*8), %edx
45165 ltr %dx
45166
45167@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45168 // Let's clear it again for our return.
45169 // The GDT descriptor of the Host
45170 // Points to the table after two "size" bytes
45171- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45172+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45173 // Clear "used" from type field (byte 5, bit 2)
45174- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45175+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45176+
45177+#ifdef CONFIG_PAX_KERNEXEC
45178+ mov %cr0, %eax
45179+ xor $X86_CR0_WP, %eax
45180+ mov %eax, %cr0
45181+#endif
45182
45183 // Once our page table's switched, the Guest is live!
45184 // The Host fades as we run this final step.
45185@@ -295,13 +309,12 @@ deliver_to_host:
45186 // I consulted gcc, and it gave
45187 // These instructions, which I gladly credit:
45188 leal (%edx,%ebx,8), %eax
45189- movzwl (%eax),%edx
45190- movl 4(%eax), %eax
45191- xorw %ax, %ax
45192- orl %eax, %edx
45193+ movl 4(%eax), %edx
45194+ movw (%eax), %dx
45195 // Now the address of the handler's in %edx
45196 // We call it now: its "iret" drops us home.
45197- jmp *%edx
45198+ ljmp $__KERNEL_CS, $1f
45199+1: jmp *%edx
45200
45201 // Every interrupt can come to us here
45202 // But we must truly tell each apart.
45203diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45204index a08e3ee..df8ade2 100644
45205--- a/drivers/md/bcache/closure.h
45206+++ b/drivers/md/bcache/closure.h
45207@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45208 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45209 struct workqueue_struct *wq)
45210 {
45211- BUG_ON(object_is_on_stack(cl));
45212+ BUG_ON(object_starts_on_stack(cl));
45213 closure_set_ip(cl);
45214 cl->fn = fn;
45215 cl->wq = wq;
45216diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45217index 3a57679..c58cdaf 100644
45218--- a/drivers/md/bitmap.c
45219+++ b/drivers/md/bitmap.c
45220@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45221 chunk_kb ? "KB" : "B");
45222 if (bitmap->storage.file) {
45223 seq_printf(seq, ", file: ");
45224- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45225+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45226 }
45227
45228 seq_printf(seq, "\n");
45229diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45230index c8a18e4..0ab43e5 100644
45231--- a/drivers/md/dm-ioctl.c
45232+++ b/drivers/md/dm-ioctl.c
45233@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45234 cmd == DM_LIST_VERSIONS_CMD)
45235 return 0;
45236
45237- if ((cmd == DM_DEV_CREATE_CMD)) {
45238+ if (cmd == DM_DEV_CREATE_CMD) {
45239 if (!*param->name) {
45240 DMWARN("name not supplied when creating device");
45241 return -EINVAL;
45242diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45243index 089d627..ef7352e 100644
45244--- a/drivers/md/dm-raid1.c
45245+++ b/drivers/md/dm-raid1.c
45246@@ -40,7 +40,7 @@ enum dm_raid1_error {
45247
45248 struct mirror {
45249 struct mirror_set *ms;
45250- atomic_t error_count;
45251+ atomic_unchecked_t error_count;
45252 unsigned long error_type;
45253 struct dm_dev *dev;
45254 sector_t offset;
45255@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45256 struct mirror *m;
45257
45258 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45259- if (!atomic_read(&m->error_count))
45260+ if (!atomic_read_unchecked(&m->error_count))
45261 return m;
45262
45263 return NULL;
45264@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45265 * simple way to tell if a device has encountered
45266 * errors.
45267 */
45268- atomic_inc(&m->error_count);
45269+ atomic_inc_unchecked(&m->error_count);
45270
45271 if (test_and_set_bit(error_type, &m->error_type))
45272 return;
45273@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45274 struct mirror *m = get_default_mirror(ms);
45275
45276 do {
45277- if (likely(!atomic_read(&m->error_count)))
45278+ if (likely(!atomic_read_unchecked(&m->error_count)))
45279 return m;
45280
45281 if (m-- == ms->mirror)
45282@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45283 {
45284 struct mirror *default_mirror = get_default_mirror(m->ms);
45285
45286- return !atomic_read(&default_mirror->error_count);
45287+ return !atomic_read_unchecked(&default_mirror->error_count);
45288 }
45289
45290 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45291@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45292 */
45293 if (likely(region_in_sync(ms, region, 1)))
45294 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45295- else if (m && atomic_read(&m->error_count))
45296+ else if (m && atomic_read_unchecked(&m->error_count))
45297 m = NULL;
45298
45299 if (likely(m))
45300@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45301 }
45302
45303 ms->mirror[mirror].ms = ms;
45304- atomic_set(&(ms->mirror[mirror].error_count), 0);
45305+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45306 ms->mirror[mirror].error_type = 0;
45307 ms->mirror[mirror].offset = offset;
45308
45309@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
45310 */
45311 static char device_status_char(struct mirror *m)
45312 {
45313- if (!atomic_read(&(m->error_count)))
45314+ if (!atomic_read_unchecked(&(m->error_count)))
45315 return 'A';
45316
45317 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45318diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45319index f478a4c..4b8e5ef 100644
45320--- a/drivers/md/dm-stats.c
45321+++ b/drivers/md/dm-stats.c
45322@@ -382,7 +382,7 @@ do_sync_free:
45323 synchronize_rcu_expedited();
45324 dm_stat_free(&s->rcu_head);
45325 } else {
45326- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45327+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45328 call_rcu(&s->rcu_head, dm_stat_free);
45329 }
45330 return 0;
45331@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45332 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45333 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45334 ));
45335- ACCESS_ONCE(last->last_sector) = end_sector;
45336- ACCESS_ONCE(last->last_rw) = bi_rw;
45337+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45338+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45339 }
45340
45341 rcu_read_lock();
45342diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45343index f8b37d4..5c5cafd 100644
45344--- a/drivers/md/dm-stripe.c
45345+++ b/drivers/md/dm-stripe.c
45346@@ -21,7 +21,7 @@ struct stripe {
45347 struct dm_dev *dev;
45348 sector_t physical_start;
45349
45350- atomic_t error_count;
45351+ atomic_unchecked_t error_count;
45352 };
45353
45354 struct stripe_c {
45355@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45356 kfree(sc);
45357 return r;
45358 }
45359- atomic_set(&(sc->stripe[i].error_count), 0);
45360+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45361 }
45362
45363 ti->private = sc;
45364@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45365 DMEMIT("%d ", sc->stripes);
45366 for (i = 0; i < sc->stripes; i++) {
45367 DMEMIT("%s ", sc->stripe[i].dev->name);
45368- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45369+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45370 'D' : 'A';
45371 }
45372 buffer[i] = '\0';
45373@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45374 */
45375 for (i = 0; i < sc->stripes; i++)
45376 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45377- atomic_inc(&(sc->stripe[i].error_count));
45378- if (atomic_read(&(sc->stripe[i].error_count)) <
45379+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45380+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45381 DM_IO_ERROR_THRESHOLD)
45382 schedule_work(&sc->trigger_event);
45383 }
45384diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45385index 6554d91..b0221c2 100644
45386--- a/drivers/md/dm-table.c
45387+++ b/drivers/md/dm-table.c
45388@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45389 if (!dev_size)
45390 return 0;
45391
45392- if ((start >= dev_size) || (start + len > dev_size)) {
45393+ if ((start >= dev_size) || (len > dev_size - start)) {
45394 DMWARN("%s: %s too small for target: "
45395 "start=%llu, len=%llu, dev_size=%llu",
45396 dm_device_name(ti->table->md), bdevname(bdev, b),
45397diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45398index 79f6941..b33b4e0 100644
45399--- a/drivers/md/dm-thin-metadata.c
45400+++ b/drivers/md/dm-thin-metadata.c
45401@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45402 {
45403 pmd->info.tm = pmd->tm;
45404 pmd->info.levels = 2;
45405- pmd->info.value_type.context = pmd->data_sm;
45406+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45407 pmd->info.value_type.size = sizeof(__le64);
45408 pmd->info.value_type.inc = data_block_inc;
45409 pmd->info.value_type.dec = data_block_dec;
45410@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45411
45412 pmd->bl_info.tm = pmd->tm;
45413 pmd->bl_info.levels = 1;
45414- pmd->bl_info.value_type.context = pmd->data_sm;
45415+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45416 pmd->bl_info.value_type.size = sizeof(__le64);
45417 pmd->bl_info.value_type.inc = data_block_inc;
45418 pmd->bl_info.value_type.dec = data_block_dec;
45419diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45420index 8001fe9..abdd0d0 100644
45421--- a/drivers/md/dm.c
45422+++ b/drivers/md/dm.c
45423@@ -188,9 +188,9 @@ struct mapped_device {
45424 /*
45425 * Event handling.
45426 */
45427- atomic_t event_nr;
45428+ atomic_unchecked_t event_nr;
45429 wait_queue_head_t eventq;
45430- atomic_t uevent_seq;
45431+ atomic_unchecked_t uevent_seq;
45432 struct list_head uevent_list;
45433 spinlock_t uevent_lock; /* Protect access to uevent_list */
45434
45435@@ -2163,8 +2163,8 @@ static struct mapped_device *alloc_dev(int minor)
45436 spin_lock_init(&md->deferred_lock);
45437 atomic_set(&md->holders, 1);
45438 atomic_set(&md->open_count, 0);
45439- atomic_set(&md->event_nr, 0);
45440- atomic_set(&md->uevent_seq, 0);
45441+ atomic_set_unchecked(&md->event_nr, 0);
45442+ atomic_set_unchecked(&md->uevent_seq, 0);
45443 INIT_LIST_HEAD(&md->uevent_list);
45444 INIT_LIST_HEAD(&md->table_devices);
45445 spin_lock_init(&md->uevent_lock);
45446@@ -2329,7 +2329,7 @@ static void event_callback(void *context)
45447
45448 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45449
45450- atomic_inc(&md->event_nr);
45451+ atomic_inc_unchecked(&md->event_nr);
45452 wake_up(&md->eventq);
45453 }
45454
45455@@ -3175,18 +3175,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45456
45457 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45458 {
45459- return atomic_add_return(1, &md->uevent_seq);
45460+ return atomic_add_return_unchecked(1, &md->uevent_seq);
45461 }
45462
45463 uint32_t dm_get_event_nr(struct mapped_device *md)
45464 {
45465- return atomic_read(&md->event_nr);
45466+ return atomic_read_unchecked(&md->event_nr);
45467 }
45468
45469 int dm_wait_event(struct mapped_device *md, int event_nr)
45470 {
45471 return wait_event_interruptible(md->eventq,
45472- (event_nr != atomic_read(&md->event_nr)));
45473+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45474 }
45475
45476 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45477diff --git a/drivers/md/md.c b/drivers/md/md.c
45478index e47d1dd..ebc3480 100644
45479--- a/drivers/md/md.c
45480+++ b/drivers/md/md.c
45481@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45482 * start build, activate spare
45483 */
45484 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45485-static atomic_t md_event_count;
45486+static atomic_unchecked_t md_event_count;
45487 void md_new_event(struct mddev *mddev)
45488 {
45489- atomic_inc(&md_event_count);
45490+ atomic_inc_unchecked(&md_event_count);
45491 wake_up(&md_event_waiters);
45492 }
45493 EXPORT_SYMBOL_GPL(md_new_event);
45494@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45495 */
45496 static void md_new_event_inintr(struct mddev *mddev)
45497 {
45498- atomic_inc(&md_event_count);
45499+ atomic_inc_unchecked(&md_event_count);
45500 wake_up(&md_event_waiters);
45501 }
45502
45503@@ -1442,7 +1442,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45504 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45505 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45506 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45507- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45508+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45509
45510 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45511 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45512@@ -1693,7 +1693,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45513 else
45514 sb->resync_offset = cpu_to_le64(0);
45515
45516- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45517+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45518
45519 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45520 sb->size = cpu_to_le64(mddev->dev_sectors);
45521@@ -2564,7 +2564,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
45522 static ssize_t
45523 errors_show(struct md_rdev *rdev, char *page)
45524 {
45525- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45526+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45527 }
45528
45529 static ssize_t
45530@@ -2573,7 +2573,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45531 char *e;
45532 unsigned long n = simple_strtoul(buf, &e, 10);
45533 if (*buf && (*e == 0 || *e == '\n')) {
45534- atomic_set(&rdev->corrected_errors, n);
45535+ atomic_set_unchecked(&rdev->corrected_errors, n);
45536 return len;
45537 }
45538 return -EINVAL;
45539@@ -3009,8 +3009,8 @@ int md_rdev_init(struct md_rdev *rdev)
45540 rdev->sb_loaded = 0;
45541 rdev->bb_page = NULL;
45542 atomic_set(&rdev->nr_pending, 0);
45543- atomic_set(&rdev->read_errors, 0);
45544- atomic_set(&rdev->corrected_errors, 0);
45545+ atomic_set_unchecked(&rdev->read_errors, 0);
45546+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45547
45548 INIT_LIST_HEAD(&rdev->same_set);
45549 init_waitqueue_head(&rdev->blocked_wait);
45550@@ -7083,7 +7083,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45551
45552 spin_unlock(&pers_lock);
45553 seq_printf(seq, "\n");
45554- seq->poll_event = atomic_read(&md_event_count);
45555+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45556 return 0;
45557 }
45558 if (v == (void*)2) {
45559@@ -7186,7 +7186,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45560 return error;
45561
45562 seq = file->private_data;
45563- seq->poll_event = atomic_read(&md_event_count);
45564+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45565 return error;
45566 }
45567
45568@@ -7203,7 +7203,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45569 /* always allow read */
45570 mask = POLLIN | POLLRDNORM;
45571
45572- if (seq->poll_event != atomic_read(&md_event_count))
45573+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45574 mask |= POLLERR | POLLPRI;
45575 return mask;
45576 }
45577@@ -7250,7 +7250,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45578 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45579 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45580 (int)part_stat_read(&disk->part0, sectors[1]) -
45581- atomic_read(&disk->sync_io);
45582+ atomic_read_unchecked(&disk->sync_io);
45583 /* sync IO will cause sync_io to increase before the disk_stats
45584 * as sync_io is counted when a request starts, and
45585 * disk_stats is counted when it completes.
45586diff --git a/drivers/md/md.h b/drivers/md/md.h
45587index 318ca8f..31e4478 100644
45588--- a/drivers/md/md.h
45589+++ b/drivers/md/md.h
45590@@ -94,13 +94,13 @@ struct md_rdev {
45591 * only maintained for arrays that
45592 * support hot removal
45593 */
45594- atomic_t read_errors; /* number of consecutive read errors that
45595+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45596 * we have tried to ignore.
45597 */
45598 struct timespec last_read_error; /* monotonic time since our
45599 * last read error
45600 */
45601- atomic_t corrected_errors; /* number of corrected read errors,
45602+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45603 * for reporting to userspace and storing
45604 * in superblock.
45605 */
45606@@ -476,7 +476,7 @@ extern void mddev_unlock(struct mddev *mddev);
45607
45608 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45609 {
45610- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45611+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45612 }
45613
45614 struct md_personality
45615diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45616index e8a9042..35bd145 100644
45617--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45618+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45619@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45620 * Flick into a mode where all blocks get allocated in the new area.
45621 */
45622 smm->begin = old_len;
45623- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45624+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45625
45626 /*
45627 * Extend.
45628@@ -714,7 +714,7 @@ out:
45629 /*
45630 * Switch back to normal behaviour.
45631 */
45632- memcpy(sm, &ops, sizeof(*sm));
45633+ memcpy((void *)sm, &ops, sizeof(*sm));
45634 return r;
45635 }
45636
45637diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45638index 3e6d115..ffecdeb 100644
45639--- a/drivers/md/persistent-data/dm-space-map.h
45640+++ b/drivers/md/persistent-data/dm-space-map.h
45641@@ -71,6 +71,7 @@ struct dm_space_map {
45642 dm_sm_threshold_fn fn,
45643 void *context);
45644 };
45645+typedef struct dm_space_map __no_const dm_space_map_no_const;
45646
45647 /*----------------------------------------------------------------*/
45648
45649diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
45650index 3b5d7f7..903391c 100644
45651--- a/drivers/md/raid0.c
45652+++ b/drivers/md/raid0.c
45653@@ -517,6 +517,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
45654 ? (sector & (chunk_sects-1))
45655 : sector_div(sector, chunk_sects));
45656
45657+ /* Restore due to sector_div */
45658+ sector = bio->bi_iter.bi_sector;
45659+
45660 if (sectors < bio_sectors(bio)) {
45661 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
45662 bio_chain(split, bio);
45663@@ -524,7 +527,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
45664 split = bio;
45665 }
45666
45667- sector = bio->bi_iter.bi_sector;
45668 zone = find_zone(mddev->private, &sector);
45669 tmp_dev = map_sector(mddev, zone, sector, &sector);
45670 split->bi_bdev = tmp_dev->bdev;
45671diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45672index d34e238..34f8d98 100644
45673--- a/drivers/md/raid1.c
45674+++ b/drivers/md/raid1.c
45675@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45676 if (r1_sync_page_io(rdev, sect, s,
45677 bio->bi_io_vec[idx].bv_page,
45678 READ) != 0)
45679- atomic_add(s, &rdev->corrected_errors);
45680+ atomic_add_unchecked(s, &rdev->corrected_errors);
45681 }
45682 sectors -= s;
45683 sect += s;
45684@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45685 !test_bit(Faulty, &rdev->flags)) {
45686 if (r1_sync_page_io(rdev, sect, s,
45687 conf->tmppage, READ)) {
45688- atomic_add(s, &rdev->corrected_errors);
45689+ atomic_add_unchecked(s, &rdev->corrected_errors);
45690 printk(KERN_INFO
45691 "md/raid1:%s: read error corrected "
45692 "(%d sectors at %llu on %s)\n",
45693diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45694index a7196c4..439f012 100644
45695--- a/drivers/md/raid10.c
45696+++ b/drivers/md/raid10.c
45697@@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
45698 /* The write handler will notice the lack of
45699 * R10BIO_Uptodate and record any errors etc
45700 */
45701- atomic_add(r10_bio->sectors,
45702+ atomic_add_unchecked(r10_bio->sectors,
45703 &conf->mirrors[d].rdev->corrected_errors);
45704
45705 /* for reconstruct, we always reschedule after a read.
45706@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45707 {
45708 struct timespec cur_time_mon;
45709 unsigned long hours_since_last;
45710- unsigned int read_errors = atomic_read(&rdev->read_errors);
45711+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45712
45713 ktime_get_ts(&cur_time_mon);
45714
45715@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45716 * overflowing the shift of read_errors by hours_since_last.
45717 */
45718 if (hours_since_last >= 8 * sizeof(read_errors))
45719- atomic_set(&rdev->read_errors, 0);
45720+ atomic_set_unchecked(&rdev->read_errors, 0);
45721 else
45722- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45723+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45724 }
45725
45726 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45727@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45728 return;
45729
45730 check_decay_read_errors(mddev, rdev);
45731- atomic_inc(&rdev->read_errors);
45732- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45733+ atomic_inc_unchecked(&rdev->read_errors);
45734+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45735 char b[BDEVNAME_SIZE];
45736 bdevname(rdev->bdev, b);
45737
45738@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45739 "md/raid10:%s: %s: Raid device exceeded "
45740 "read_error threshold [cur %d:max %d]\n",
45741 mdname(mddev), b,
45742- atomic_read(&rdev->read_errors), max_read_errors);
45743+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45744 printk(KERN_NOTICE
45745 "md/raid10:%s: %s: Failing raid device\n",
45746 mdname(mddev), b);
45747@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45748 sect +
45749 choose_data_offset(r10_bio, rdev)),
45750 bdevname(rdev->bdev, b));
45751- atomic_add(s, &rdev->corrected_errors);
45752+ atomic_add_unchecked(s, &rdev->corrected_errors);
45753 }
45754
45755 rdev_dec_pending(rdev, mddev);
45756diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
45757index cd2f96b..3876e63 100644
45758--- a/drivers/md/raid5.c
45759+++ b/drivers/md/raid5.c
45760@@ -947,23 +947,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
45761 struct bio_vec bvl;
45762 struct bvec_iter iter;
45763 struct page *bio_page;
45764- int page_offset;
45765+ s64 page_offset;
45766 struct async_submit_ctl submit;
45767 enum async_tx_flags flags = 0;
45768
45769 if (bio->bi_iter.bi_sector >= sector)
45770- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
45771+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
45772 else
45773- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
45774+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
45775
45776 if (frombio)
45777 flags |= ASYNC_TX_FENCE;
45778 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
45779
45780 bio_for_each_segment(bvl, bio, iter) {
45781- int len = bvl.bv_len;
45782- int clen;
45783- int b_offset = 0;
45784+ s64 len = bvl.bv_len;
45785+ s64 clen;
45786+ s64 b_offset = 0;
45787
45788 if (page_offset < 0) {
45789 b_offset = -page_offset;
45790@@ -1727,6 +1727,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
45791 return 1;
45792 }
45793
45794+#ifdef CONFIG_GRKERNSEC_HIDESYM
45795+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
45796+#endif
45797+
45798 static int grow_stripes(struct r5conf *conf, int num)
45799 {
45800 struct kmem_cache *sc;
45801@@ -1738,7 +1742,11 @@ static int grow_stripes(struct r5conf *conf, int num)
45802 "raid%d-%s", conf->level, mdname(conf->mddev));
45803 else
45804 sprintf(conf->cache_name[0],
45805+#ifdef CONFIG_GRKERNSEC_HIDESYM
45806+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
45807+#else
45808 "raid%d-%p", conf->level, conf->mddev);
45809+#endif
45810 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
45811
45812 conf->active_name = 0;
45813@@ -2014,21 +2022,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
45814 mdname(conf->mddev), STRIPE_SECTORS,
45815 (unsigned long long)s,
45816 bdevname(rdev->bdev, b));
45817- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
45818+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
45819 clear_bit(R5_ReadError, &sh->dev[i].flags);
45820 clear_bit(R5_ReWrite, &sh->dev[i].flags);
45821 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
45822 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
45823
45824- if (atomic_read(&rdev->read_errors))
45825- atomic_set(&rdev->read_errors, 0);
45826+ if (atomic_read_unchecked(&rdev->read_errors))
45827+ atomic_set_unchecked(&rdev->read_errors, 0);
45828 } else {
45829 const char *bdn = bdevname(rdev->bdev, b);
45830 int retry = 0;
45831 int set_bad = 0;
45832
45833 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
45834- atomic_inc(&rdev->read_errors);
45835+ atomic_inc_unchecked(&rdev->read_errors);
45836 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
45837 printk_ratelimited(
45838 KERN_WARNING
45839@@ -2056,7 +2064,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
45840 mdname(conf->mddev),
45841 (unsigned long long)s,
45842 bdn);
45843- } else if (atomic_read(&rdev->read_errors)
45844+ } else if (atomic_read_unchecked(&rdev->read_errors)
45845 > conf->max_nr_stripes)
45846 printk(KERN_WARNING
45847 "md/raid:%s: Too many read errors, failing device %s.\n",
45848diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
45849index 983db75..ef9248c 100644
45850--- a/drivers/media/dvb-core/dvbdev.c
45851+++ b/drivers/media/dvb-core/dvbdev.c
45852@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
45853 const struct dvb_device *template, void *priv, int type)
45854 {
45855 struct dvb_device *dvbdev;
45856- struct file_operations *dvbdevfops;
45857+ file_operations_no_const *dvbdevfops;
45858 struct device *clsdev;
45859 int minor;
45860 int id;
45861diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
45862index 6ad22b6..6e90e2a 100644
45863--- a/drivers/media/dvb-frontends/af9033.h
45864+++ b/drivers/media/dvb-frontends/af9033.h
45865@@ -96,6 +96,6 @@ struct af9033_ops {
45866 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
45867 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
45868 int onoff);
45869-};
45870+} __no_const;
45871
45872 #endif /* AF9033_H */
45873diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
45874index 9b6c3bb..baeb5c7 100644
45875--- a/drivers/media/dvb-frontends/dib3000.h
45876+++ b/drivers/media/dvb-frontends/dib3000.h
45877@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
45878 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
45879 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
45880 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
45881-};
45882+} __no_const;
45883
45884 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
45885 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
45886diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
45887index 1fea0e9..321ce8f 100644
45888--- a/drivers/media/dvb-frontends/dib7000p.h
45889+++ b/drivers/media/dvb-frontends/dib7000p.h
45890@@ -64,7 +64,7 @@ struct dib7000p_ops {
45891 int (*get_adc_power)(struct dvb_frontend *fe);
45892 int (*slave_reset)(struct dvb_frontend *fe);
45893 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
45894-};
45895+} __no_const;
45896
45897 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
45898 void *dib7000p_attach(struct dib7000p_ops *ops);
45899diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
45900index 84cc103..5780c54 100644
45901--- a/drivers/media/dvb-frontends/dib8000.h
45902+++ b/drivers/media/dvb-frontends/dib8000.h
45903@@ -61,7 +61,7 @@ struct dib8000_ops {
45904 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
45905 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
45906 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
45907-};
45908+} __no_const;
45909
45910 #if IS_ENABLED(CONFIG_DVB_DIB8000)
45911 void *dib8000_attach(struct dib8000_ops *ops);
45912diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
45913index 860c98fc..497fa25 100644
45914--- a/drivers/media/pci/cx88/cx88-video.c
45915+++ b/drivers/media/pci/cx88/cx88-video.c
45916@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
45917
45918 /* ------------------------------------------------------------------ */
45919
45920-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45921-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45922-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45923+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45924+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45925+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45926
45927 module_param_array(video_nr, int, NULL, 0444);
45928 module_param_array(vbi_nr, int, NULL, 0444);
45929diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
45930index 802642d..5534900 100644
45931--- a/drivers/media/pci/ivtv/ivtv-driver.c
45932+++ b/drivers/media/pci/ivtv/ivtv-driver.c
45933@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
45934 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
45935
45936 /* ivtv instance counter */
45937-static atomic_t ivtv_instance = ATOMIC_INIT(0);
45938+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
45939
45940 /* Parameter declarations */
45941 static int cardtype[IVTV_MAX_CARDS];
45942diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
45943index 570d119..ed25830 100644
45944--- a/drivers/media/pci/solo6x10/solo6x10-core.c
45945+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
45946@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
45947
45948 static int solo_sysfs_init(struct solo_dev *solo_dev)
45949 {
45950- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45951+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45952 struct device *dev = &solo_dev->dev;
45953 const char *driver;
45954 int i;
45955diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
45956index 7ddc767..1c24361 100644
45957--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
45958+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
45959@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
45960
45961 int solo_g723_init(struct solo_dev *solo_dev)
45962 {
45963- static struct snd_device_ops ops = { NULL };
45964+ static struct snd_device_ops ops = { };
45965 struct snd_card *card;
45966 struct snd_kcontrol_new kctl;
45967 char name[32];
45968diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45969index 8c84846..27b4f83 100644
45970--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
45971+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45972@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
45973
45974 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
45975 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
45976- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
45977+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
45978 if (p2m_id < 0)
45979 p2m_id = -p2m_id;
45980 }
45981diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
45982index 1ca54b0..7d7cb9a 100644
45983--- a/drivers/media/pci/solo6x10/solo6x10.h
45984+++ b/drivers/media/pci/solo6x10/solo6x10.h
45985@@ -218,7 +218,7 @@ struct solo_dev {
45986
45987 /* P2M DMA Engine */
45988 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
45989- atomic_t p2m_count;
45990+ atomic_unchecked_t p2m_count;
45991 int p2m_jiffies;
45992 unsigned int p2m_timeouts;
45993
45994diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
45995index c135165..dc69499 100644
45996--- a/drivers/media/pci/tw68/tw68-core.c
45997+++ b/drivers/media/pci/tw68/tw68-core.c
45998@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
45999 module_param_array(card, int, NULL, 0444);
46000 MODULE_PARM_DESC(card, "card type");
46001
46002-static atomic_t tw68_instance = ATOMIC_INIT(0);
46003+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
46004
46005 /* ------------------------------------------------------------------ */
46006
46007diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46008index ba2d8f9..1566684 100644
46009--- a/drivers/media/platform/omap/omap_vout.c
46010+++ b/drivers/media/platform/omap/omap_vout.c
46011@@ -63,7 +63,6 @@ enum omap_vout_channels {
46012 OMAP_VIDEO2,
46013 };
46014
46015-static struct videobuf_queue_ops video_vbq_ops;
46016 /* Variables configurable through module params*/
46017 static u32 video1_numbuffers = 3;
46018 static u32 video2_numbuffers = 3;
46019@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
46020 {
46021 struct videobuf_queue *q;
46022 struct omap_vout_device *vout = NULL;
46023+ static struct videobuf_queue_ops video_vbq_ops = {
46024+ .buf_setup = omap_vout_buffer_setup,
46025+ .buf_prepare = omap_vout_buffer_prepare,
46026+ .buf_release = omap_vout_buffer_release,
46027+ .buf_queue = omap_vout_buffer_queue,
46028+ };
46029
46030 vout = video_drvdata(file);
46031 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46032@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
46033 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46034
46035 q = &vout->vbq;
46036- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46037- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46038- video_vbq_ops.buf_release = omap_vout_buffer_release;
46039- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46040 spin_lock_init(&vout->vbq_lock);
46041
46042 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46043diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46044index fb2acc5..a2fcbdc4 100644
46045--- a/drivers/media/platform/s5p-tv/mixer.h
46046+++ b/drivers/media/platform/s5p-tv/mixer.h
46047@@ -156,7 +156,7 @@ struct mxr_layer {
46048 /** layer index (unique identifier) */
46049 int idx;
46050 /** callbacks for layer methods */
46051- struct mxr_layer_ops ops;
46052+ struct mxr_layer_ops *ops;
46053 /** format array */
46054 const struct mxr_format **fmt_array;
46055 /** size of format array */
46056diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46057index 74344c7..a39e70e 100644
46058--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46059+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46060@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46061 {
46062 struct mxr_layer *layer;
46063 int ret;
46064- struct mxr_layer_ops ops = {
46065+ static struct mxr_layer_ops ops = {
46066 .release = mxr_graph_layer_release,
46067 .buffer_set = mxr_graph_buffer_set,
46068 .stream_set = mxr_graph_stream_set,
46069diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46070index b713403..53cb5ad 100644
46071--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46072+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46073@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46074 layer->update_buf = next;
46075 }
46076
46077- layer->ops.buffer_set(layer, layer->update_buf);
46078+ layer->ops->buffer_set(layer, layer->update_buf);
46079
46080 if (done && done != layer->shadow_buf)
46081 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46082diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46083index 72d4f2e..4b2ea0d 100644
46084--- a/drivers/media/platform/s5p-tv/mixer_video.c
46085+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46086@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46087 layer->geo.src.height = layer->geo.src.full_height;
46088
46089 mxr_geometry_dump(mdev, &layer->geo);
46090- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46091+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46092 mxr_geometry_dump(mdev, &layer->geo);
46093 }
46094
46095@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46096 layer->geo.dst.full_width = mbus_fmt.width;
46097 layer->geo.dst.full_height = mbus_fmt.height;
46098 layer->geo.dst.field = mbus_fmt.field;
46099- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46100+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46101
46102 mxr_geometry_dump(mdev, &layer->geo);
46103 }
46104@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46105 /* set source size to highest accepted value */
46106 geo->src.full_width = max(geo->dst.full_width, pix->width);
46107 geo->src.full_height = max(geo->dst.full_height, pix->height);
46108- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46109+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46110 mxr_geometry_dump(mdev, &layer->geo);
46111 /* set cropping to total visible screen */
46112 geo->src.width = pix->width;
46113@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46114 geo->src.x_offset = 0;
46115 geo->src.y_offset = 0;
46116 /* assure consistency of geometry */
46117- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46118+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46119 mxr_geometry_dump(mdev, &layer->geo);
46120 /* set full size to lowest possible value */
46121 geo->src.full_width = 0;
46122 geo->src.full_height = 0;
46123- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46124+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46125 mxr_geometry_dump(mdev, &layer->geo);
46126
46127 /* returning results */
46128@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46129 target->width = s->r.width;
46130 target->height = s->r.height;
46131
46132- layer->ops.fix_geometry(layer, stage, s->flags);
46133+ layer->ops->fix_geometry(layer, stage, s->flags);
46134
46135 /* retrieve update selection rectangle */
46136 res.left = target->x_offset;
46137@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46138 mxr_output_get(mdev);
46139
46140 mxr_layer_update_output(layer);
46141- layer->ops.format_set(layer);
46142+ layer->ops->format_set(layer);
46143 /* enabling layer in hardware */
46144 spin_lock_irqsave(&layer->enq_slock, flags);
46145 layer->state = MXR_LAYER_STREAMING;
46146 spin_unlock_irqrestore(&layer->enq_slock, flags);
46147
46148- layer->ops.stream_set(layer, MXR_ENABLE);
46149+ layer->ops->stream_set(layer, MXR_ENABLE);
46150 mxr_streamer_get(mdev);
46151
46152 return 0;
46153@@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
46154 spin_unlock_irqrestore(&layer->enq_slock, flags);
46155
46156 /* disabling layer in hardware */
46157- layer->ops.stream_set(layer, MXR_DISABLE);
46158+ layer->ops->stream_set(layer, MXR_DISABLE);
46159 /* remove one streamer */
46160 mxr_streamer_put(mdev);
46161 /* allow changes in output configuration */
46162@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46163
46164 void mxr_layer_release(struct mxr_layer *layer)
46165 {
46166- if (layer->ops.release)
46167- layer->ops.release(layer);
46168+ if (layer->ops->release)
46169+ layer->ops->release(layer);
46170 }
46171
46172 void mxr_base_layer_release(struct mxr_layer *layer)
46173@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46174
46175 layer->mdev = mdev;
46176 layer->idx = idx;
46177- layer->ops = *ops;
46178+ layer->ops = ops;
46179
46180 spin_lock_init(&layer->enq_slock);
46181 INIT_LIST_HEAD(&layer->enq_list);
46182diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46183index c9388c4..ce71ece 100644
46184--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46185+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46186@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46187 {
46188 struct mxr_layer *layer;
46189 int ret;
46190- struct mxr_layer_ops ops = {
46191+ static struct mxr_layer_ops ops = {
46192 .release = mxr_vp_layer_release,
46193 .buffer_set = mxr_vp_buffer_set,
46194 .stream_set = mxr_vp_stream_set,
46195diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46196index 82affae..42833ec 100644
46197--- a/drivers/media/radio/radio-cadet.c
46198+++ b/drivers/media/radio/radio-cadet.c
46199@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46200 unsigned char readbuf[RDS_BUFFER];
46201 int i = 0;
46202
46203+ if (count > RDS_BUFFER)
46204+ return -EFAULT;
46205 mutex_lock(&dev->lock);
46206 if (dev->rdsstat == 0)
46207 cadet_start_rds(dev);
46208@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46209 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46210 mutex_unlock(&dev->lock);
46211
46212- if (i && copy_to_user(data, readbuf, i))
46213- return -EFAULT;
46214+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46215+ i = -EFAULT;
46216+
46217 return i;
46218 }
46219
46220diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46221index 5236035..c622c74 100644
46222--- a/drivers/media/radio/radio-maxiradio.c
46223+++ b/drivers/media/radio/radio-maxiradio.c
46224@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46225 /* TEA5757 pin mappings */
46226 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46227
46228-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46229+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46230
46231 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46232 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46233diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46234index 050b3bb..79f62b9 100644
46235--- a/drivers/media/radio/radio-shark.c
46236+++ b/drivers/media/radio/radio-shark.c
46237@@ -79,7 +79,7 @@ struct shark_device {
46238 u32 last_val;
46239 };
46240
46241-static atomic_t shark_instance = ATOMIC_INIT(0);
46242+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46243
46244 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46245 {
46246diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46247index 8654e0d..0608a64 100644
46248--- a/drivers/media/radio/radio-shark2.c
46249+++ b/drivers/media/radio/radio-shark2.c
46250@@ -74,7 +74,7 @@ struct shark_device {
46251 u8 *transfer_buffer;
46252 };
46253
46254-static atomic_t shark_instance = ATOMIC_INIT(0);
46255+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46256
46257 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46258 {
46259diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46260index dccf586..d5db411 100644
46261--- a/drivers/media/radio/radio-si476x.c
46262+++ b/drivers/media/radio/radio-si476x.c
46263@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46264 struct si476x_radio *radio;
46265 struct v4l2_ctrl *ctrl;
46266
46267- static atomic_t instance = ATOMIC_INIT(0);
46268+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46269
46270 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46271 if (!radio)
46272diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
46273index 704397f..4d05977 100644
46274--- a/drivers/media/radio/wl128x/fmdrv_common.c
46275+++ b/drivers/media/radio/wl128x/fmdrv_common.c
46276@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
46277 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
46278
46279 /* Radio Nr */
46280-static u32 radio_nr = -1;
46281+static int radio_nr = -1;
46282 module_param(radio_nr, int, 0444);
46283 MODULE_PARM_DESC(radio_nr, "Radio Nr");
46284
46285diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46286index 9fd1527..8927230 100644
46287--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46288+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46289@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46290
46291 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46292 {
46293- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46294- char result[64];
46295- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46296- sizeof(result), 0);
46297+ char *buf;
46298+ char *result;
46299+ int retval;
46300+
46301+ buf = kmalloc(2, GFP_KERNEL);
46302+ if (buf == NULL)
46303+ return -ENOMEM;
46304+ result = kmalloc(64, GFP_KERNEL);
46305+ if (result == NULL) {
46306+ kfree(buf);
46307+ return -ENOMEM;
46308+ }
46309+
46310+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46311+ buf[1] = enable ? 1 : 0;
46312+
46313+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46314+
46315+ kfree(buf);
46316+ kfree(result);
46317+ return retval;
46318 }
46319
46320 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46321 {
46322- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46323- char state[3];
46324- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46325+ char *buf;
46326+ char *state;
46327+ int retval;
46328+
46329+ buf = kmalloc(2, GFP_KERNEL);
46330+ if (buf == NULL)
46331+ return -ENOMEM;
46332+ state = kmalloc(3, GFP_KERNEL);
46333+ if (state == NULL) {
46334+ kfree(buf);
46335+ return -ENOMEM;
46336+ }
46337+
46338+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46339+ buf[1] = enable ? 1 : 0;
46340+
46341+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46342+
46343+ kfree(buf);
46344+ kfree(state);
46345+ return retval;
46346 }
46347
46348 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46349 {
46350- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46351- char state[3];
46352+ char *query;
46353+ char *state;
46354 int ret;
46355+ query = kmalloc(1, GFP_KERNEL);
46356+ if (query == NULL)
46357+ return -ENOMEM;
46358+ state = kmalloc(3, GFP_KERNEL);
46359+ if (state == NULL) {
46360+ kfree(query);
46361+ return -ENOMEM;
46362+ }
46363+
46364+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46365
46366 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46367
46368- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46369- sizeof(state), 0);
46370+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46371 if (ret < 0) {
46372 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46373 "state info\n");
46374@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46375
46376 /* Copy this pointer as we are gonna need it in the release phase */
46377 cinergyt2_usb_device = adap->dev;
46378-
46379+ kfree(query);
46380+ kfree(state);
46381 return 0;
46382 }
46383
46384@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46385 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46386 {
46387 struct cinergyt2_state *st = d->priv;
46388- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46389+ u8 *key, *cmd;
46390 int i;
46391
46392+ cmd = kmalloc(1, GFP_KERNEL);
46393+ if (cmd == NULL)
46394+ return -EINVAL;
46395+ key = kzalloc(5, GFP_KERNEL);
46396+ if (key == NULL) {
46397+ kfree(cmd);
46398+ return -EINVAL;
46399+ }
46400+
46401+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46402+
46403 *state = REMOTE_NO_KEY_PRESSED;
46404
46405- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46406+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46407 if (key[4] == 0xff) {
46408 /* key repeat */
46409 st->rc_counter++;
46410@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46411 *event = d->last_event;
46412 deb_rc("repeat key, event %x\n",
46413 *event);
46414- return 0;
46415+ goto out;
46416 }
46417 }
46418 deb_rc("repeated key (non repeatable)\n");
46419 }
46420- return 0;
46421+ goto out;
46422 }
46423
46424 /* hack to pass checksum on the custom field */
46425@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46426
46427 deb_rc("key: %*ph\n", 5, key);
46428 }
46429+out:
46430+ kfree(cmd);
46431+ kfree(key);
46432 return 0;
46433 }
46434
46435diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46436index c890fe4..f9b2ae6 100644
46437--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46438+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46439@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46440 fe_status_t *status)
46441 {
46442 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46443- struct dvbt_get_status_msg result;
46444- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46445+ struct dvbt_get_status_msg *result;
46446+ u8 *cmd;
46447 int ret;
46448
46449- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46450- sizeof(result), 0);
46451+ cmd = kmalloc(1, GFP_KERNEL);
46452+ if (cmd == NULL)
46453+ return -ENOMEM;
46454+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46455+ if (result == NULL) {
46456+ kfree(cmd);
46457+ return -ENOMEM;
46458+ }
46459+
46460+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46461+
46462+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46463+ sizeof(*result), 0);
46464 if (ret < 0)
46465- return ret;
46466+ goto out;
46467
46468 *status = 0;
46469
46470- if (0xffff - le16_to_cpu(result.gain) > 30)
46471+ if (0xffff - le16_to_cpu(result->gain) > 30)
46472 *status |= FE_HAS_SIGNAL;
46473- if (result.lock_bits & (1 << 6))
46474+ if (result->lock_bits & (1 << 6))
46475 *status |= FE_HAS_LOCK;
46476- if (result.lock_bits & (1 << 5))
46477+ if (result->lock_bits & (1 << 5))
46478 *status |= FE_HAS_SYNC;
46479- if (result.lock_bits & (1 << 4))
46480+ if (result->lock_bits & (1 << 4))
46481 *status |= FE_HAS_CARRIER;
46482- if (result.lock_bits & (1 << 1))
46483+ if (result->lock_bits & (1 << 1))
46484 *status |= FE_HAS_VITERBI;
46485
46486 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46487 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46488 *status &= ~FE_HAS_LOCK;
46489
46490- return 0;
46491+out:
46492+ kfree(cmd);
46493+ kfree(result);
46494+ return ret;
46495 }
46496
46497 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46498 {
46499 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46500- struct dvbt_get_status_msg status;
46501- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46502+ struct dvbt_get_status_msg *status;
46503+ char *cmd;
46504 int ret;
46505
46506- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46507- sizeof(status), 0);
46508+ cmd = kmalloc(1, GFP_KERNEL);
46509+ if (cmd == NULL)
46510+ return -ENOMEM;
46511+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46512+ if (status == NULL) {
46513+ kfree(cmd);
46514+ return -ENOMEM;
46515+ }
46516+
46517+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46518+
46519+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46520+ sizeof(*status), 0);
46521 if (ret < 0)
46522- return ret;
46523+ goto out;
46524
46525- *ber = le32_to_cpu(status.viterbi_error_rate);
46526+ *ber = le32_to_cpu(status->viterbi_error_rate);
46527+out:
46528+ kfree(cmd);
46529+ kfree(status);
46530 return 0;
46531 }
46532
46533 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46534 {
46535 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46536- struct dvbt_get_status_msg status;
46537- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46538+ struct dvbt_get_status_msg *status;
46539+ u8 *cmd;
46540 int ret;
46541
46542- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46543- sizeof(status), 0);
46544+ cmd = kmalloc(1, GFP_KERNEL);
46545+ if (cmd == NULL)
46546+ return -ENOMEM;
46547+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46548+ if (status == NULL) {
46549+ kfree(cmd);
46550+ return -ENOMEM;
46551+ }
46552+
46553+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46554+
46555+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46556+ sizeof(*status), 0);
46557 if (ret < 0) {
46558 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46559 ret);
46560- return ret;
46561+ goto out;
46562 }
46563- *unc = le32_to_cpu(status.uncorrected_block_count);
46564- return 0;
46565+ *unc = le32_to_cpu(status->uncorrected_block_count);
46566+
46567+out:
46568+ kfree(cmd);
46569+ kfree(status);
46570+ return ret;
46571 }
46572
46573 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46574 u16 *strength)
46575 {
46576 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46577- struct dvbt_get_status_msg status;
46578- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46579+ struct dvbt_get_status_msg *status;
46580+ char *cmd;
46581 int ret;
46582
46583- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46584- sizeof(status), 0);
46585+ cmd = kmalloc(1, GFP_KERNEL);
46586+ if (cmd == NULL)
46587+ return -ENOMEM;
46588+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46589+ if (status == NULL) {
46590+ kfree(cmd);
46591+ return -ENOMEM;
46592+ }
46593+
46594+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46595+
46596+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46597+ sizeof(*status), 0);
46598 if (ret < 0) {
46599 err("cinergyt2_fe_read_signal_strength() Failed!"
46600 " (Error=%d)\n", ret);
46601- return ret;
46602+ goto out;
46603 }
46604- *strength = (0xffff - le16_to_cpu(status.gain));
46605+ *strength = (0xffff - le16_to_cpu(status->gain));
46606+
46607+out:
46608+ kfree(cmd);
46609+ kfree(status);
46610 return 0;
46611 }
46612
46613 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46614 {
46615 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46616- struct dvbt_get_status_msg status;
46617- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46618+ struct dvbt_get_status_msg *status;
46619+ char *cmd;
46620 int ret;
46621
46622- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46623- sizeof(status), 0);
46624+ cmd = kmalloc(1, GFP_KERNEL);
46625+ if (cmd == NULL)
46626+ return -ENOMEM;
46627+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46628+ if (status == NULL) {
46629+ kfree(cmd);
46630+ return -ENOMEM;
46631+ }
46632+
46633+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46634+
46635+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46636+ sizeof(*status), 0);
46637 if (ret < 0) {
46638 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46639- return ret;
46640+ goto out;
46641 }
46642- *snr = (status.snr << 8) | status.snr;
46643- return 0;
46644+ *snr = (status->snr << 8) | status->snr;
46645+
46646+out:
46647+ kfree(cmd);
46648+ kfree(status);
46649+ return ret;
46650 }
46651
46652 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46653@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46654 {
46655 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46656 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46657- struct dvbt_set_parameters_msg param;
46658- char result[2];
46659+ struct dvbt_set_parameters_msg *param;
46660+ char *result;
46661 int err;
46662
46663- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46664- param.tps = cpu_to_le16(compute_tps(fep));
46665- param.freq = cpu_to_le32(fep->frequency / 1000);
46666- param.flags = 0;
46667+ result = kmalloc(2, GFP_KERNEL);
46668+ if (result == NULL)
46669+ return -ENOMEM;
46670+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46671+ if (param == NULL) {
46672+ kfree(result);
46673+ return -ENOMEM;
46674+ }
46675+
46676+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46677+ param->tps = cpu_to_le16(compute_tps(fep));
46678+ param->freq = cpu_to_le32(fep->frequency / 1000);
46679+ param->flags = 0;
46680
46681 switch (fep->bandwidth_hz) {
46682 default:
46683 case 8000000:
46684- param.bandwidth = 8;
46685+ param->bandwidth = 8;
46686 break;
46687 case 7000000:
46688- param.bandwidth = 7;
46689+ param->bandwidth = 7;
46690 break;
46691 case 6000000:
46692- param.bandwidth = 6;
46693+ param->bandwidth = 6;
46694 break;
46695 }
46696
46697 err = dvb_usb_generic_rw(state->d,
46698- (char *)&param, sizeof(param),
46699- result, sizeof(result), 0);
46700+ (char *)param, sizeof(*param),
46701+ result, 2, 0);
46702 if (err < 0)
46703 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46704
46705- return (err < 0) ? err : 0;
46706+ kfree(result);
46707+ kfree(param);
46708+ return err;
46709 }
46710
46711 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46712diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46713index 733a7ff..f8b52e3 100644
46714--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46715+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46716@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46717
46718 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46719 {
46720- struct hexline hx;
46721- u8 reset;
46722+ struct hexline *hx;
46723+ u8 *reset;
46724 int ret,pos=0;
46725
46726+ reset = kmalloc(1, GFP_KERNEL);
46727+ if (reset == NULL)
46728+ return -ENOMEM;
46729+
46730+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46731+ if (hx == NULL) {
46732+ kfree(reset);
46733+ return -ENOMEM;
46734+ }
46735+
46736 /* stop the CPU */
46737- reset = 1;
46738- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46739+ reset[0] = 1;
46740+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46741 err("could not stop the USB controller CPU.");
46742
46743- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46744- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46745- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46746+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46747+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46748+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46749
46750- if (ret != hx.len) {
46751+ if (ret != hx->len) {
46752 err("error while transferring firmware "
46753 "(transferred size: %d, block size: %d)",
46754- ret,hx.len);
46755+ ret,hx->len);
46756 ret = -EINVAL;
46757 break;
46758 }
46759 }
46760 if (ret < 0) {
46761 err("firmware download failed at %d with %d",pos,ret);
46762+ kfree(reset);
46763+ kfree(hx);
46764 return ret;
46765 }
46766
46767 if (ret == 0) {
46768 /* restart the CPU */
46769- reset = 0;
46770- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46771+ reset[0] = 0;
46772+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46773 err("could not restart the USB controller CPU.");
46774 ret = -EINVAL;
46775 }
46776 } else
46777 ret = -EIO;
46778
46779+ kfree(reset);
46780+ kfree(hx);
46781+
46782 return ret;
46783 }
46784 EXPORT_SYMBOL(usb_cypress_load_firmware);
46785diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
46786index 1a3df10..57997a5 100644
46787--- a/drivers/media/usb/dvb-usb/dw2102.c
46788+++ b/drivers/media/usb/dvb-usb/dw2102.c
46789@@ -118,7 +118,7 @@ struct su3000_state {
46790
46791 struct s6x0_state {
46792 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
46793-};
46794+} __no_const;
46795
46796 /* debug */
46797 static int dvb_usb_dw2102_debug;
46798diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
46799index 5801ae7..83f71fa 100644
46800--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
46801+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
46802@@ -87,8 +87,11 @@ struct technisat_usb2_state {
46803 static int technisat_usb2_i2c_access(struct usb_device *udev,
46804 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
46805 {
46806- u8 b[64];
46807- int ret, actual_length;
46808+ u8 *b = kmalloc(64, GFP_KERNEL);
46809+ int ret, actual_length, error = 0;
46810+
46811+ if (b == NULL)
46812+ return -ENOMEM;
46813
46814 deb_i2c("i2c-access: %02x, tx: ", device_addr);
46815 debug_dump(tx, txlen, deb_i2c);
46816@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46817
46818 if (ret < 0) {
46819 err("i2c-error: out failed %02x = %d", device_addr, ret);
46820- return -ENODEV;
46821+ error = -ENODEV;
46822+ goto out;
46823 }
46824
46825 ret = usb_bulk_msg(udev,
46826@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46827 b, 64, &actual_length, 1000);
46828 if (ret < 0) {
46829 err("i2c-error: in failed %02x = %d", device_addr, ret);
46830- return -ENODEV;
46831+ error = -ENODEV;
46832+ goto out;
46833 }
46834
46835 if (b[0] != I2C_STATUS_OK) {
46836@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46837 /* handle tuner-i2c-nak */
46838 if (!(b[0] == I2C_STATUS_NAK &&
46839 device_addr == 0x60
46840- /* && device_is_technisat_usb2 */))
46841- return -ENODEV;
46842+ /* && device_is_technisat_usb2 */)) {
46843+ error = -ENODEV;
46844+ goto out;
46845+ }
46846 }
46847
46848 deb_i2c("status: %d, ", b[0]);
46849@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46850
46851 deb_i2c("\n");
46852
46853- return 0;
46854+out:
46855+ kfree(b);
46856+ return error;
46857 }
46858
46859 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
46860@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46861 {
46862 int ret;
46863
46864- u8 led[8] = {
46865- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46866- 0
46867- };
46868+ u8 *led = kzalloc(8, GFP_KERNEL);
46869+
46870+ if (led == NULL)
46871+ return -ENOMEM;
46872
46873 if (disable_led_control && state != TECH_LED_OFF)
46874 return 0;
46875
46876+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
46877+
46878 switch (state) {
46879 case TECH_LED_ON:
46880 led[1] = 0x82;
46881@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46882 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46883 USB_TYPE_VENDOR | USB_DIR_OUT,
46884 0, 0,
46885- led, sizeof(led), 500);
46886+ led, 8, 500);
46887
46888 mutex_unlock(&d->i2c_mutex);
46889+
46890+ kfree(led);
46891+
46892 return ret;
46893 }
46894
46895 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46896 {
46897 int ret;
46898- u8 b = 0;
46899+ u8 *b = kzalloc(1, GFP_KERNEL);
46900+
46901+ if (b == NULL)
46902+ return -ENOMEM;
46903
46904 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
46905 return -EAGAIN;
46906@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
46907 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
46908 USB_TYPE_VENDOR | USB_DIR_OUT,
46909 (red << 8) | green, 0,
46910- &b, 1, 500);
46911+ b, 1, 500);
46912
46913 mutex_unlock(&d->i2c_mutex);
46914
46915+ kfree(b);
46916+
46917 return ret;
46918 }
46919
46920@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46921 struct dvb_usb_device_description **desc, int *cold)
46922 {
46923 int ret;
46924- u8 version[3];
46925+ u8 *version = kmalloc(3, GFP_KERNEL);
46926
46927 /* first select the interface */
46928 if (usb_set_interface(udev, 0, 1) != 0)
46929@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46930
46931 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
46932
46933+ if (version == NULL)
46934+ return 0;
46935+
46936 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
46937 GET_VERSION_INFO_VENDOR_REQUEST,
46938 USB_TYPE_VENDOR | USB_DIR_IN,
46939 0, 0,
46940- version, sizeof(version), 500);
46941+ version, 3, 500);
46942
46943 if (ret < 0)
46944 *cold = 1;
46945@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46946 *cold = 0;
46947 }
46948
46949+ kfree(version);
46950+
46951 return 0;
46952 }
46953
46954@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
46955
46956 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46957 {
46958- u8 buf[62], *b;
46959+ u8 *buf, *b;
46960 int ret;
46961 struct ir_raw_event ev;
46962
46963+ buf = kmalloc(62, GFP_KERNEL);
46964+
46965+ if (buf == NULL)
46966+ return -ENOMEM;
46967+
46968 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
46969 buf[1] = 0x08;
46970 buf[2] = 0x8f;
46971@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46972 GET_IR_DATA_VENDOR_REQUEST,
46973 USB_TYPE_VENDOR | USB_DIR_IN,
46974 0x8080, 0,
46975- buf, sizeof(buf), 500);
46976+ buf, 62, 500);
46977
46978 unlock:
46979 mutex_unlock(&d->i2c_mutex);
46980
46981- if (ret < 0)
46982+ if (ret < 0) {
46983+ kfree(buf);
46984 return ret;
46985+ }
46986
46987- if (ret == 1)
46988+ if (ret == 1) {
46989+ kfree(buf);
46990 return 0; /* no key pressed */
46991+ }
46992
46993 /* decoding */
46994 b = buf+1;
46995@@ -656,6 +689,8 @@ unlock:
46996
46997 ir_raw_event_handle(d->rc_dev);
46998
46999+ kfree(buf);
47000+
47001 return 1;
47002 }
47003
47004diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47005index af63543..0436f20 100644
47006--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47007+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47008@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47009 * by passing a very big num_planes value */
47010 uplane = compat_alloc_user_space(num_planes *
47011 sizeof(struct v4l2_plane));
47012- kp->m.planes = (__force struct v4l2_plane *)uplane;
47013+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
47014
47015 while (--num_planes >= 0) {
47016 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47017@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47018 if (num_planes == 0)
47019 return 0;
47020
47021- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
47022+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47023 if (get_user(p, &up->m.planes))
47024 return -EFAULT;
47025 uplane32 = compat_ptr(p);
47026@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47027 get_user(kp->flags, &up->flags) ||
47028 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47029 return -EFAULT;
47030- kp->base = (__force void *)compat_ptr(tmp);
47031+ kp->base = (__force_kernel void *)compat_ptr(tmp);
47032 return 0;
47033 }
47034
47035@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47036 n * sizeof(struct v4l2_ext_control32)))
47037 return -EFAULT;
47038 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47039- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
47040+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
47041 while (--n >= 0) {
47042 u32 id;
47043
47044@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47045 {
47046 struct v4l2_ext_control32 __user *ucontrols;
47047 struct v4l2_ext_control __user *kcontrols =
47048- (__force struct v4l2_ext_control __user *)kp->controls;
47049+ (struct v4l2_ext_control __force_user *)kp->controls;
47050 int n = kp->count;
47051 compat_caddr_t p;
47052
47053@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47054 get_user(tmp, &up->edid) ||
47055 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47056 return -EFAULT;
47057- kp->edid = (__force u8 *)compat_ptr(tmp);
47058+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
47059 return 0;
47060 }
47061
47062diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47063index 015f92a..59e311e 100644
47064--- a/drivers/media/v4l2-core/v4l2-device.c
47065+++ b/drivers/media/v4l2-core/v4l2-device.c
47066@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47067 EXPORT_SYMBOL_GPL(v4l2_device_put);
47068
47069 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47070- atomic_t *instance)
47071+ atomic_unchecked_t *instance)
47072 {
47073- int num = atomic_inc_return(instance) - 1;
47074+ int num = atomic_inc_return_unchecked(instance) - 1;
47075 int len = strlen(basename);
47076
47077 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47078diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47079index b084072..36706d7 100644
47080--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47081+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47082@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
47083 struct file *file, void *fh, void *p);
47084 } u;
47085 void (*debug)(const void *arg, bool write_only);
47086-};
47087+} __do_const;
47088+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47089
47090 /* This control needs a priority check */
47091 #define INFO_FL_PRIO (1 << 0)
47092@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
47093 struct video_device *vfd = video_devdata(file);
47094 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47095 bool write_only = false;
47096- struct v4l2_ioctl_info default_info;
47097+ v4l2_ioctl_info_no_const default_info;
47098 const struct v4l2_ioctl_info *info;
47099 void *fh = file->private_data;
47100 struct v4l2_fh *vfh = NULL;
47101@@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47102 ret = -EINVAL;
47103 break;
47104 }
47105- *user_ptr = (void __user *)buf->m.planes;
47106+ *user_ptr = (void __force_user *)buf->m.planes;
47107 *kernel_ptr = (void **)&buf->m.planes;
47108 *array_size = sizeof(struct v4l2_plane) * buf->length;
47109 ret = 1;
47110@@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47111 ret = -EINVAL;
47112 break;
47113 }
47114- *user_ptr = (void __user *)edid->edid;
47115+ *user_ptr = (void __force_user *)edid->edid;
47116 *kernel_ptr = (void **)&edid->edid;
47117 *array_size = edid->blocks * 128;
47118 ret = 1;
47119@@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47120 ret = -EINVAL;
47121 break;
47122 }
47123- *user_ptr = (void __user *)ctrls->controls;
47124+ *user_ptr = (void __force_user *)ctrls->controls;
47125 *kernel_ptr = (void **)&ctrls->controls;
47126 *array_size = sizeof(struct v4l2_ext_control)
47127 * ctrls->count;
47128@@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47129 }
47130
47131 if (has_array_args) {
47132- *kernel_ptr = (void __force *)user_ptr;
47133+ *kernel_ptr = (void __force_kernel *)user_ptr;
47134 if (copy_to_user(user_ptr, mbuf, array_size))
47135 err = -EFAULT;
47136 goto out_array_args;
47137diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
47138index 24696f5..3637780 100644
47139--- a/drivers/memory/omap-gpmc.c
47140+++ b/drivers/memory/omap-gpmc.c
47141@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
47142 };
47143
47144 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
47145-static struct irq_chip gpmc_irq_chip;
47146 static int gpmc_irq_start;
47147
47148 static struct resource gpmc_mem_root;
47149@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
47150
47151 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
47152
47153+static struct irq_chip gpmc_irq_chip = {
47154+ .name = "gpmc",
47155+ .irq_startup = gpmc_irq_noop_ret,
47156+ .irq_enable = gpmc_irq_enable,
47157+ .irq_disable = gpmc_irq_disable,
47158+ .irq_shutdown = gpmc_irq_noop,
47159+ .irq_ack = gpmc_irq_noop,
47160+ .irq_mask = gpmc_irq_noop,
47161+ .irq_unmask = gpmc_irq_noop,
47162+};
47163+
47164 static int gpmc_setup_irq(void)
47165 {
47166 int i;
47167@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
47168 return gpmc_irq_start;
47169 }
47170
47171- gpmc_irq_chip.name = "gpmc";
47172- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
47173- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
47174- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
47175- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
47176- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
47177- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
47178- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
47179-
47180 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
47181 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
47182
47183diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47184index 187f836..679544b 100644
47185--- a/drivers/message/fusion/mptbase.c
47186+++ b/drivers/message/fusion/mptbase.c
47187@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47188 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47189 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47190
47191+#ifdef CONFIG_GRKERNSEC_HIDESYM
47192+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47193+#else
47194 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47195 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47196+#endif
47197+
47198 /*
47199 * Rounding UP to nearest 4-kB boundary here...
47200 */
47201@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47202 ioc->facts.GlobalCredits);
47203
47204 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47205+#ifdef CONFIG_GRKERNSEC_HIDESYM
47206+ NULL, NULL);
47207+#else
47208 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47209+#endif
47210 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47211 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47212 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47213diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47214index 5bdaae1..eced16f 100644
47215--- a/drivers/message/fusion/mptsas.c
47216+++ b/drivers/message/fusion/mptsas.c
47217@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47218 return 0;
47219 }
47220
47221+static inline void
47222+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47223+{
47224+ if (phy_info->port_details) {
47225+ phy_info->port_details->rphy = rphy;
47226+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47227+ ioc->name, rphy));
47228+ }
47229+
47230+ if (rphy) {
47231+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47232+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47233+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47234+ ioc->name, rphy, rphy->dev.release));
47235+ }
47236+}
47237+
47238 /* no mutex */
47239 static void
47240 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47241@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47242 return NULL;
47243 }
47244
47245-static inline void
47246-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47247-{
47248- if (phy_info->port_details) {
47249- phy_info->port_details->rphy = rphy;
47250- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47251- ioc->name, rphy));
47252- }
47253-
47254- if (rphy) {
47255- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47256- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47257- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47258- ioc->name, rphy, rphy->dev.release));
47259- }
47260-}
47261-
47262 static inline struct sas_port *
47263 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47264 {
47265diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47266index 9a8e185..27ff17d 100644
47267--- a/drivers/mfd/ab8500-debugfs.c
47268+++ b/drivers/mfd/ab8500-debugfs.c
47269@@ -100,7 +100,7 @@ static int irq_last;
47270 static u32 *irq_count;
47271 static int num_irqs;
47272
47273-static struct device_attribute **dev_attr;
47274+static device_attribute_no_const **dev_attr;
47275 static char **event_name;
47276
47277 static u8 avg_sample = SAMPLE_16;
47278diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
47279index 5615522..1eb6f3dc 100644
47280--- a/drivers/mfd/kempld-core.c
47281+++ b/drivers/mfd/kempld-core.c
47282@@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
47283 .remove = kempld_remove,
47284 };
47285
47286-static struct dmi_system_id kempld_dmi_table[] __initdata = {
47287+static const struct dmi_system_id kempld_dmi_table[] __initconst = {
47288 {
47289 .ident = "BHL6",
47290 .matches = {
47291diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47292index c880c89..45a7c68 100644
47293--- a/drivers/mfd/max8925-i2c.c
47294+++ b/drivers/mfd/max8925-i2c.c
47295@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47296 const struct i2c_device_id *id)
47297 {
47298 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47299- static struct max8925_chip *chip;
47300+ struct max8925_chip *chip;
47301 struct device_node *node = client->dev.of_node;
47302
47303 if (node && !pdata) {
47304diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47305index 7612d89..70549c2 100644
47306--- a/drivers/mfd/tps65910.c
47307+++ b/drivers/mfd/tps65910.c
47308@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47309 struct tps65910_platform_data *pdata)
47310 {
47311 int ret = 0;
47312- static struct regmap_irq_chip *tps6591x_irqs_chip;
47313+ struct regmap_irq_chip *tps6591x_irqs_chip;
47314
47315 if (!irq) {
47316 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47317diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47318index 1b772ef..01e77d33 100644
47319--- a/drivers/mfd/twl4030-irq.c
47320+++ b/drivers/mfd/twl4030-irq.c
47321@@ -34,6 +34,7 @@
47322 #include <linux/of.h>
47323 #include <linux/irqdomain.h>
47324 #include <linux/i2c/twl.h>
47325+#include <asm/pgtable.h>
47326
47327 #include "twl-core.h"
47328
47329@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47330 * Install an irq handler for each of the SIH modules;
47331 * clone dummy irq_chip since PIH can't *do* anything
47332 */
47333- twl4030_irq_chip = dummy_irq_chip;
47334- twl4030_irq_chip.name = "twl4030";
47335+ pax_open_kernel();
47336+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47337+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47338
47339- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47340+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47341+ pax_close_kernel();
47342
47343 for (i = irq_base; i < irq_end; i++) {
47344 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47345diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47346index 464419b..64bae8d 100644
47347--- a/drivers/misc/c2port/core.c
47348+++ b/drivers/misc/c2port/core.c
47349@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47350 goto error_idr_alloc;
47351 c2dev->id = ret;
47352
47353- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47354+ pax_open_kernel();
47355+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47356+ pax_close_kernel();
47357
47358 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47359 "c2port%d", c2dev->id);
47360diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47361index 8385177..2f54635 100644
47362--- a/drivers/misc/eeprom/sunxi_sid.c
47363+++ b/drivers/misc/eeprom/sunxi_sid.c
47364@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47365
47366 platform_set_drvdata(pdev, sid_data);
47367
47368- sid_bin_attr.size = sid_data->keysize;
47369+ pax_open_kernel();
47370+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47371+ pax_close_kernel();
47372 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47373 return -ENODEV;
47374
47375diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47376index 36f5d52..32311c3 100644
47377--- a/drivers/misc/kgdbts.c
47378+++ b/drivers/misc/kgdbts.c
47379@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47380 char before[BREAK_INSTR_SIZE];
47381 char after[BREAK_INSTR_SIZE];
47382
47383- probe_kernel_read(before, (char *)kgdbts_break_test,
47384+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47385 BREAK_INSTR_SIZE);
47386 init_simple_test();
47387 ts.tst = plant_and_detach_test;
47388@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47389 /* Activate test with initial breakpoint */
47390 if (!is_early)
47391 kgdb_breakpoint();
47392- probe_kernel_read(after, (char *)kgdbts_break_test,
47393+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47394 BREAK_INSTR_SIZE);
47395 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47396 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47397diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47398index 3ef4627..8d00486 100644
47399--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47400+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47401@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47402 * the lid is closed. This leads to interrupts as soon as a little move
47403 * is done.
47404 */
47405- atomic_inc(&lis3->count);
47406+ atomic_inc_unchecked(&lis3->count);
47407
47408 wake_up_interruptible(&lis3->misc_wait);
47409 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47410@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47411 if (lis3->pm_dev)
47412 pm_runtime_get_sync(lis3->pm_dev);
47413
47414- atomic_set(&lis3->count, 0);
47415+ atomic_set_unchecked(&lis3->count, 0);
47416 return 0;
47417 }
47418
47419@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47420 add_wait_queue(&lis3->misc_wait, &wait);
47421 while (true) {
47422 set_current_state(TASK_INTERRUPTIBLE);
47423- data = atomic_xchg(&lis3->count, 0);
47424+ data = atomic_xchg_unchecked(&lis3->count, 0);
47425 if (data)
47426 break;
47427
47428@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47429 struct lis3lv02d, miscdev);
47430
47431 poll_wait(file, &lis3->misc_wait, wait);
47432- if (atomic_read(&lis3->count))
47433+ if (atomic_read_unchecked(&lis3->count))
47434 return POLLIN | POLLRDNORM;
47435 return 0;
47436 }
47437diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47438index c439c82..1f20f57 100644
47439--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47440+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47441@@ -297,7 +297,7 @@ struct lis3lv02d {
47442 struct input_polled_dev *idev; /* input device */
47443 struct platform_device *pdev; /* platform device */
47444 struct regulator_bulk_data regulators[2];
47445- atomic_t count; /* interrupt count after last read */
47446+ atomic_unchecked_t count; /* interrupt count after last read */
47447 union axis_conversion ac; /* hw -> logical axis */
47448 int mapped_btns[3];
47449
47450diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47451index 2f30bad..c4c13d0 100644
47452--- a/drivers/misc/sgi-gru/gruhandles.c
47453+++ b/drivers/misc/sgi-gru/gruhandles.c
47454@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47455 unsigned long nsec;
47456
47457 nsec = CLKS2NSEC(clks);
47458- atomic_long_inc(&mcs_op_statistics[op].count);
47459- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47460+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47461+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47462 if (mcs_op_statistics[op].max < nsec)
47463 mcs_op_statistics[op].max = nsec;
47464 }
47465diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47466index 4f76359..cdfcb2e 100644
47467--- a/drivers/misc/sgi-gru/gruprocfs.c
47468+++ b/drivers/misc/sgi-gru/gruprocfs.c
47469@@ -32,9 +32,9 @@
47470
47471 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47472
47473-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47474+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47475 {
47476- unsigned long val = atomic_long_read(v);
47477+ unsigned long val = atomic_long_read_unchecked(v);
47478
47479 seq_printf(s, "%16lu %s\n", val, id);
47480 }
47481@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47482
47483 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47484 for (op = 0; op < mcsop_last; op++) {
47485- count = atomic_long_read(&mcs_op_statistics[op].count);
47486- total = atomic_long_read(&mcs_op_statistics[op].total);
47487+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47488+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47489 max = mcs_op_statistics[op].max;
47490 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47491 count ? total / count : 0, max);
47492diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47493index 5c3ce24..4915ccb 100644
47494--- a/drivers/misc/sgi-gru/grutables.h
47495+++ b/drivers/misc/sgi-gru/grutables.h
47496@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47497 * GRU statistics.
47498 */
47499 struct gru_stats_s {
47500- atomic_long_t vdata_alloc;
47501- atomic_long_t vdata_free;
47502- atomic_long_t gts_alloc;
47503- atomic_long_t gts_free;
47504- atomic_long_t gms_alloc;
47505- atomic_long_t gms_free;
47506- atomic_long_t gts_double_allocate;
47507- atomic_long_t assign_context;
47508- atomic_long_t assign_context_failed;
47509- atomic_long_t free_context;
47510- atomic_long_t load_user_context;
47511- atomic_long_t load_kernel_context;
47512- atomic_long_t lock_kernel_context;
47513- atomic_long_t unlock_kernel_context;
47514- atomic_long_t steal_user_context;
47515- atomic_long_t steal_kernel_context;
47516- atomic_long_t steal_context_failed;
47517- atomic_long_t nopfn;
47518- atomic_long_t asid_new;
47519- atomic_long_t asid_next;
47520- atomic_long_t asid_wrap;
47521- atomic_long_t asid_reuse;
47522- atomic_long_t intr;
47523- atomic_long_t intr_cbr;
47524- atomic_long_t intr_tfh;
47525- atomic_long_t intr_spurious;
47526- atomic_long_t intr_mm_lock_failed;
47527- atomic_long_t call_os;
47528- atomic_long_t call_os_wait_queue;
47529- atomic_long_t user_flush_tlb;
47530- atomic_long_t user_unload_context;
47531- atomic_long_t user_exception;
47532- atomic_long_t set_context_option;
47533- atomic_long_t check_context_retarget_intr;
47534- atomic_long_t check_context_unload;
47535- atomic_long_t tlb_dropin;
47536- atomic_long_t tlb_preload_page;
47537- atomic_long_t tlb_dropin_fail_no_asid;
47538- atomic_long_t tlb_dropin_fail_upm;
47539- atomic_long_t tlb_dropin_fail_invalid;
47540- atomic_long_t tlb_dropin_fail_range_active;
47541- atomic_long_t tlb_dropin_fail_idle;
47542- atomic_long_t tlb_dropin_fail_fmm;
47543- atomic_long_t tlb_dropin_fail_no_exception;
47544- atomic_long_t tfh_stale_on_fault;
47545- atomic_long_t mmu_invalidate_range;
47546- atomic_long_t mmu_invalidate_page;
47547- atomic_long_t flush_tlb;
47548- atomic_long_t flush_tlb_gru;
47549- atomic_long_t flush_tlb_gru_tgh;
47550- atomic_long_t flush_tlb_gru_zero_asid;
47551+ atomic_long_unchecked_t vdata_alloc;
47552+ atomic_long_unchecked_t vdata_free;
47553+ atomic_long_unchecked_t gts_alloc;
47554+ atomic_long_unchecked_t gts_free;
47555+ atomic_long_unchecked_t gms_alloc;
47556+ atomic_long_unchecked_t gms_free;
47557+ atomic_long_unchecked_t gts_double_allocate;
47558+ atomic_long_unchecked_t assign_context;
47559+ atomic_long_unchecked_t assign_context_failed;
47560+ atomic_long_unchecked_t free_context;
47561+ atomic_long_unchecked_t load_user_context;
47562+ atomic_long_unchecked_t load_kernel_context;
47563+ atomic_long_unchecked_t lock_kernel_context;
47564+ atomic_long_unchecked_t unlock_kernel_context;
47565+ atomic_long_unchecked_t steal_user_context;
47566+ atomic_long_unchecked_t steal_kernel_context;
47567+ atomic_long_unchecked_t steal_context_failed;
47568+ atomic_long_unchecked_t nopfn;
47569+ atomic_long_unchecked_t asid_new;
47570+ atomic_long_unchecked_t asid_next;
47571+ atomic_long_unchecked_t asid_wrap;
47572+ atomic_long_unchecked_t asid_reuse;
47573+ atomic_long_unchecked_t intr;
47574+ atomic_long_unchecked_t intr_cbr;
47575+ atomic_long_unchecked_t intr_tfh;
47576+ atomic_long_unchecked_t intr_spurious;
47577+ atomic_long_unchecked_t intr_mm_lock_failed;
47578+ atomic_long_unchecked_t call_os;
47579+ atomic_long_unchecked_t call_os_wait_queue;
47580+ atomic_long_unchecked_t user_flush_tlb;
47581+ atomic_long_unchecked_t user_unload_context;
47582+ atomic_long_unchecked_t user_exception;
47583+ atomic_long_unchecked_t set_context_option;
47584+ atomic_long_unchecked_t check_context_retarget_intr;
47585+ atomic_long_unchecked_t check_context_unload;
47586+ atomic_long_unchecked_t tlb_dropin;
47587+ atomic_long_unchecked_t tlb_preload_page;
47588+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47589+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47590+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47591+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47592+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47593+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47594+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47595+ atomic_long_unchecked_t tfh_stale_on_fault;
47596+ atomic_long_unchecked_t mmu_invalidate_range;
47597+ atomic_long_unchecked_t mmu_invalidate_page;
47598+ atomic_long_unchecked_t flush_tlb;
47599+ atomic_long_unchecked_t flush_tlb_gru;
47600+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47601+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47602
47603- atomic_long_t copy_gpa;
47604- atomic_long_t read_gpa;
47605+ atomic_long_unchecked_t copy_gpa;
47606+ atomic_long_unchecked_t read_gpa;
47607
47608- atomic_long_t mesq_receive;
47609- atomic_long_t mesq_receive_none;
47610- atomic_long_t mesq_send;
47611- atomic_long_t mesq_send_failed;
47612- atomic_long_t mesq_noop;
47613- atomic_long_t mesq_send_unexpected_error;
47614- atomic_long_t mesq_send_lb_overflow;
47615- atomic_long_t mesq_send_qlimit_reached;
47616- atomic_long_t mesq_send_amo_nacked;
47617- atomic_long_t mesq_send_put_nacked;
47618- atomic_long_t mesq_page_overflow;
47619- atomic_long_t mesq_qf_locked;
47620- atomic_long_t mesq_qf_noop_not_full;
47621- atomic_long_t mesq_qf_switch_head_failed;
47622- atomic_long_t mesq_qf_unexpected_error;
47623- atomic_long_t mesq_noop_unexpected_error;
47624- atomic_long_t mesq_noop_lb_overflow;
47625- atomic_long_t mesq_noop_qlimit_reached;
47626- atomic_long_t mesq_noop_amo_nacked;
47627- atomic_long_t mesq_noop_put_nacked;
47628- atomic_long_t mesq_noop_page_overflow;
47629+ atomic_long_unchecked_t mesq_receive;
47630+ atomic_long_unchecked_t mesq_receive_none;
47631+ atomic_long_unchecked_t mesq_send;
47632+ atomic_long_unchecked_t mesq_send_failed;
47633+ atomic_long_unchecked_t mesq_noop;
47634+ atomic_long_unchecked_t mesq_send_unexpected_error;
47635+ atomic_long_unchecked_t mesq_send_lb_overflow;
47636+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47637+ atomic_long_unchecked_t mesq_send_amo_nacked;
47638+ atomic_long_unchecked_t mesq_send_put_nacked;
47639+ atomic_long_unchecked_t mesq_page_overflow;
47640+ atomic_long_unchecked_t mesq_qf_locked;
47641+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47642+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47643+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47644+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47645+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47646+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47647+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47648+ atomic_long_unchecked_t mesq_noop_put_nacked;
47649+ atomic_long_unchecked_t mesq_noop_page_overflow;
47650
47651 };
47652
47653@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47654 tghop_invalidate, mcsop_last};
47655
47656 struct mcs_op_statistic {
47657- atomic_long_t count;
47658- atomic_long_t total;
47659+ atomic_long_unchecked_t count;
47660+ atomic_long_unchecked_t total;
47661 unsigned long max;
47662 };
47663
47664@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47665
47666 #define STAT(id) do { \
47667 if (gru_options & OPT_STATS) \
47668- atomic_long_inc(&gru_stats.id); \
47669+ atomic_long_inc_unchecked(&gru_stats.id); \
47670 } while (0)
47671
47672 #ifdef CONFIG_SGI_GRU_DEBUG
47673diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47674index c862cd4..0d176fe 100644
47675--- a/drivers/misc/sgi-xp/xp.h
47676+++ b/drivers/misc/sgi-xp/xp.h
47677@@ -288,7 +288,7 @@ struct xpc_interface {
47678 xpc_notify_func, void *);
47679 void (*received) (short, int, void *);
47680 enum xp_retval (*partid_to_nasids) (short, void *);
47681-};
47682+} __no_const;
47683
47684 extern struct xpc_interface xpc_interface;
47685
47686diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47687index 01be66d..e3a0c7e 100644
47688--- a/drivers/misc/sgi-xp/xp_main.c
47689+++ b/drivers/misc/sgi-xp/xp_main.c
47690@@ -78,13 +78,13 @@ xpc_notloaded(void)
47691 }
47692
47693 struct xpc_interface xpc_interface = {
47694- (void (*)(int))xpc_notloaded,
47695- (void (*)(int))xpc_notloaded,
47696- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47697- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47698+ .connect = (void (*)(int))xpc_notloaded,
47699+ .disconnect = (void (*)(int))xpc_notloaded,
47700+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47701+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47702 void *))xpc_notloaded,
47703- (void (*)(short, int, void *))xpc_notloaded,
47704- (enum xp_retval(*)(short, void *))xpc_notloaded
47705+ .received = (void (*)(short, int, void *))xpc_notloaded,
47706+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47707 };
47708 EXPORT_SYMBOL_GPL(xpc_interface);
47709
47710diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47711index b94d5f7..7f494c5 100644
47712--- a/drivers/misc/sgi-xp/xpc.h
47713+++ b/drivers/misc/sgi-xp/xpc.h
47714@@ -835,6 +835,7 @@ struct xpc_arch_operations {
47715 void (*received_payload) (struct xpc_channel *, void *);
47716 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47717 };
47718+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47719
47720 /* struct xpc_partition act_state values (for XPC HB) */
47721
47722@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47723 /* found in xpc_main.c */
47724 extern struct device *xpc_part;
47725 extern struct device *xpc_chan;
47726-extern struct xpc_arch_operations xpc_arch_ops;
47727+extern xpc_arch_operations_no_const xpc_arch_ops;
47728 extern int xpc_disengage_timelimit;
47729 extern int xpc_disengage_timedout;
47730 extern int xpc_activate_IRQ_rcvd;
47731diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47732index 82dc574..8539ab2 100644
47733--- a/drivers/misc/sgi-xp/xpc_main.c
47734+++ b/drivers/misc/sgi-xp/xpc_main.c
47735@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47736 .notifier_call = xpc_system_die,
47737 };
47738
47739-struct xpc_arch_operations xpc_arch_ops;
47740+xpc_arch_operations_no_const xpc_arch_ops;
47741
47742 /*
47743 * Timer function to enforce the timelimit on the partition disengage.
47744@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
47745
47746 if (((die_args->trapnr == X86_TRAP_MF) ||
47747 (die_args->trapnr == X86_TRAP_XF)) &&
47748- !user_mode_vm(die_args->regs))
47749+ !user_mode(die_args->regs))
47750 xpc_die_deactivate();
47751
47752 break;
47753diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47754index ed2e71a..54c498e 100644
47755--- a/drivers/mmc/card/block.c
47756+++ b/drivers/mmc/card/block.c
47757@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
47758 if (idata->ic.postsleep_min_us)
47759 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
47760
47761- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
47762+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
47763 err = -EFAULT;
47764 goto cmd_rel_host;
47765 }
47766diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
47767index 18c4afe..43be71e 100644
47768--- a/drivers/mmc/host/dw_mmc.h
47769+++ b/drivers/mmc/host/dw_mmc.h
47770@@ -271,5 +271,5 @@ struct dw_mci_drv_data {
47771 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
47772 int (*parse_dt)(struct dw_mci *host);
47773 int (*execute_tuning)(struct dw_mci_slot *slot);
47774-};
47775+} __do_const;
47776 #endif /* _DW_MMC_H_ */
47777diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
47778index 7fe1619..ae0781b 100644
47779--- a/drivers/mmc/host/mmci.c
47780+++ b/drivers/mmc/host/mmci.c
47781@@ -1630,7 +1630,9 @@ static int mmci_probe(struct amba_device *dev,
47782 mmc->caps |= MMC_CAP_CMD23;
47783
47784 if (variant->busy_detect) {
47785- mmci_ops.card_busy = mmci_card_busy;
47786+ pax_open_kernel();
47787+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
47788+ pax_close_kernel();
47789 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
47790 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
47791 mmc->max_busy_timeout = 0;
47792diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
47793index f84cfb0..aebe5d6 100644
47794--- a/drivers/mmc/host/omap_hsmmc.c
47795+++ b/drivers/mmc/host/omap_hsmmc.c
47796@@ -2054,7 +2054,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
47797
47798 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
47799 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
47800- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47801+ pax_open_kernel();
47802+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
47803+ pax_close_kernel();
47804 }
47805
47806 pm_runtime_enable(host->dev);
47807diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
47808index 10ef824..88461a2 100644
47809--- a/drivers/mmc/host/sdhci-esdhc-imx.c
47810+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
47811@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
47812 host->mmc->caps |= MMC_CAP_1_8V_DDR;
47813 }
47814
47815- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
47816- sdhci_esdhc_ops.platform_execute_tuning =
47817+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
47818+ pax_open_kernel();
47819+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47820 esdhc_executing_tuning;
47821+ pax_close_kernel();
47822+ }
47823
47824 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47825 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47826diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47827index c6d2dd7..81b1ca3 100644
47828--- a/drivers/mmc/host/sdhci-s3c.c
47829+++ b/drivers/mmc/host/sdhci-s3c.c
47830@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47831 * we can use overriding functions instead of default.
47832 */
47833 if (sc->no_divider) {
47834- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47835- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47836- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47837+ pax_open_kernel();
47838+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47839+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47840+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47841+ pax_close_kernel();
47842 }
47843
47844 /* It supports additional host capabilities if needed */
47845diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47846index 423666b..81ff5eb 100644
47847--- a/drivers/mtd/chips/cfi_cmdset_0020.c
47848+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47849@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47850 size_t totlen = 0, thislen;
47851 int ret = 0;
47852 size_t buflen = 0;
47853- static char *buffer;
47854+ char *buffer;
47855
47856 if (!ECCBUF_SIZE) {
47857 /* We should fall back to a general writev implementation.
47858diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47859index f44c606..aa4e804 100644
47860--- a/drivers/mtd/nand/denali.c
47861+++ b/drivers/mtd/nand/denali.c
47862@@ -24,6 +24,7 @@
47863 #include <linux/slab.h>
47864 #include <linux/mtd/mtd.h>
47865 #include <linux/module.h>
47866+#include <linux/slab.h>
47867
47868 #include "denali.h"
47869
47870diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47871index 33f3c3c..d6bbe6a 100644
47872--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47873+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47874@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
47875
47876 /* first try to map the upper buffer directly */
47877 if (virt_addr_valid(this->upper_buf) &&
47878- !object_is_on_stack(this->upper_buf)) {
47879+ !object_starts_on_stack(this->upper_buf)) {
47880 sg_init_one(sgl, this->upper_buf, this->upper_len);
47881 ret = dma_map_sg(this->dev, sgl, 1, dr);
47882 if (ret == 0)
47883diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47884index a5dfbfb..8042ab4 100644
47885--- a/drivers/mtd/nftlmount.c
47886+++ b/drivers/mtd/nftlmount.c
47887@@ -24,6 +24,7 @@
47888 #include <asm/errno.h>
47889 #include <linux/delay.h>
47890 #include <linux/slab.h>
47891+#include <linux/sched.h>
47892 #include <linux/mtd/mtd.h>
47893 #include <linux/mtd/nand.h>
47894 #include <linux/mtd/nftl.h>
47895diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47896index c23184a..4115c41 100644
47897--- a/drivers/mtd/sm_ftl.c
47898+++ b/drivers/mtd/sm_ftl.c
47899@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47900 #define SM_CIS_VENDOR_OFFSET 0x59
47901 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47902 {
47903- struct attribute_group *attr_group;
47904+ attribute_group_no_const *attr_group;
47905 struct attribute **attributes;
47906 struct sm_sysfs_attribute *vendor_attribute;
47907 char *vendor;
47908diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47909index 7b11243..b3278a3 100644
47910--- a/drivers/net/bonding/bond_netlink.c
47911+++ b/drivers/net/bonding/bond_netlink.c
47912@@ -585,7 +585,7 @@ nla_put_failure:
47913 return -EMSGSIZE;
47914 }
47915
47916-struct rtnl_link_ops bond_link_ops __read_mostly = {
47917+struct rtnl_link_ops bond_link_ops = {
47918 .kind = "bond",
47919 .priv_size = sizeof(struct bonding),
47920 .setup = bond_setup,
47921diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47922index b3b922a..80bba38 100644
47923--- a/drivers/net/caif/caif_hsi.c
47924+++ b/drivers/net/caif/caif_hsi.c
47925@@ -1444,7 +1444,7 @@ err:
47926 return -ENODEV;
47927 }
47928
47929-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47930+static struct rtnl_link_ops caif_hsi_link_ops = {
47931 .kind = "cfhsi",
47932 .priv_size = sizeof(struct cfhsi),
47933 .setup = cfhsi_setup,
47934diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47935index 58808f65..0bdc7b3 100644
47936--- a/drivers/net/can/Kconfig
47937+++ b/drivers/net/can/Kconfig
47938@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47939
47940 config CAN_FLEXCAN
47941 tristate "Support for Freescale FLEXCAN based chips"
47942- depends on ARM || PPC
47943+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47944 ---help---
47945 Say Y here if you want to support for Freescale FlexCAN.
47946
47947diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47948index b0f6924..59e9640 100644
47949--- a/drivers/net/can/dev.c
47950+++ b/drivers/net/can/dev.c
47951@@ -959,7 +959,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47952 return -EOPNOTSUPP;
47953 }
47954
47955-static struct rtnl_link_ops can_link_ops __read_mostly = {
47956+static struct rtnl_link_ops can_link_ops = {
47957 .kind = "can",
47958 .maxtype = IFLA_CAN_MAX,
47959 .policy = can_policy,
47960diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47961index 674f367..ec3a31f 100644
47962--- a/drivers/net/can/vcan.c
47963+++ b/drivers/net/can/vcan.c
47964@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47965 dev->destructor = free_netdev;
47966 }
47967
47968-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47969+static struct rtnl_link_ops vcan_link_ops = {
47970 .kind = "vcan",
47971 .setup = vcan_setup,
47972 };
47973diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47974index 49adbf1..fff7ff8 100644
47975--- a/drivers/net/dummy.c
47976+++ b/drivers/net/dummy.c
47977@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47978 return 0;
47979 }
47980
47981-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47982+static struct rtnl_link_ops dummy_link_ops = {
47983 .kind = DRV_NAME,
47984 .setup = dummy_setup,
47985 .validate = dummy_validate,
47986diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47987index 0443654..4f0aa18 100644
47988--- a/drivers/net/ethernet/8390/ax88796.c
47989+++ b/drivers/net/ethernet/8390/ax88796.c
47990@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47991 if (ax->plat->reg_offsets)
47992 ei_local->reg_offset = ax->plat->reg_offsets;
47993 else {
47994+ resource_size_t _mem_size = mem_size;
47995+ do_div(_mem_size, 0x18);
47996 ei_local->reg_offset = ax->reg_offsets;
47997 for (ret = 0; ret < 0x18; ret++)
47998- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47999+ ax->reg_offsets[ret] = _mem_size * ret;
48000 }
48001
48002 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48003diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48004index 6725dc0..163549c 100644
48005--- a/drivers/net/ethernet/altera/altera_tse_main.c
48006+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48007@@ -1216,7 +1216,7 @@ static int tse_shutdown(struct net_device *dev)
48008 return 0;
48009 }
48010
48011-static struct net_device_ops altera_tse_netdev_ops = {
48012+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48013 .ndo_open = tse_open,
48014 .ndo_stop = tse_shutdown,
48015 .ndo_start_xmit = tse_start_xmit,
48016@@ -1453,11 +1453,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48017 ndev->netdev_ops = &altera_tse_netdev_ops;
48018 altera_tse_set_ethtool_ops(ndev);
48019
48020+ pax_open_kernel();
48021 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48022
48023 if (priv->hash_filter)
48024 altera_tse_netdev_ops.ndo_set_rx_mode =
48025 tse_set_rx_mode_hashfilter;
48026+ pax_close_kernel();
48027
48028 /* Scatter/gather IO is not supported,
48029 * so it is turned off
48030diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48031index 29a0927..5a348e24 100644
48032--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48033+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48034@@ -1122,14 +1122,14 @@ do { \
48035 * operations, everything works on mask values.
48036 */
48037 #define XMDIO_READ(_pdata, _mmd, _reg) \
48038- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48039+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48040 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48041
48042 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48043 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48044
48045 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48046- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48047+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48048 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48049
48050 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48051diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48052index 8a50b01..39c1ad0 100644
48053--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48054+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48055@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48056
48057 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48058
48059- pdata->hw_if.config_dcb_tc(pdata);
48060+ pdata->hw_if->config_dcb_tc(pdata);
48061
48062 return 0;
48063 }
48064@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48065
48066 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48067
48068- pdata->hw_if.config_dcb_pfc(pdata);
48069+ pdata->hw_if->config_dcb_pfc(pdata);
48070
48071 return 0;
48072 }
48073diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48074index d81fc6b..6f8ab25 100644
48075--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48076+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48077@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
48078
48079 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48080 {
48081- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48082+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48083 struct xgbe_channel *channel;
48084 struct xgbe_ring *ring;
48085 struct xgbe_ring_data *rdata;
48086@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48087
48088 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48089 {
48090- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48091+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48092 struct xgbe_channel *channel;
48093 struct xgbe_ring *ring;
48094 struct xgbe_ring_desc *rdesc;
48095@@ -620,17 +620,12 @@ err_out:
48096 return 0;
48097 }
48098
48099-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48100-{
48101- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48102-
48103- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48104- desc_if->free_ring_resources = xgbe_free_ring_resources;
48105- desc_if->map_tx_skb = xgbe_map_tx_skb;
48106- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
48107- desc_if->unmap_rdata = xgbe_unmap_rdata;
48108- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48109- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48110-
48111- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48112-}
48113+const struct xgbe_desc_if default_xgbe_desc_if = {
48114+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48115+ .free_ring_resources = xgbe_free_ring_resources,
48116+ .map_tx_skb = xgbe_map_tx_skb,
48117+ .map_rx_buffer = xgbe_map_rx_buffer,
48118+ .unmap_rdata = xgbe_unmap_rdata,
48119+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48120+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48121+};
48122diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48123index 400757b..d8c53f6 100644
48124--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48125+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48126@@ -2748,7 +2748,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48127
48128 static int xgbe_init(struct xgbe_prv_data *pdata)
48129 {
48130- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48131+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48132 int ret;
48133
48134 DBGPR("-->xgbe_init\n");
48135@@ -2813,108 +2813,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48136 return 0;
48137 }
48138
48139-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48140-{
48141- DBGPR("-->xgbe_init_function_ptrs\n");
48142-
48143- hw_if->tx_complete = xgbe_tx_complete;
48144-
48145- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48146- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48147- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
48148- hw_if->set_mac_address = xgbe_set_mac_address;
48149-
48150- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48151- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48152-
48153- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48154- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48155- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48156- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48157- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48158-
48159- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48160- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48161-
48162- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48163- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48164- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48165-
48166- hw_if->enable_tx = xgbe_enable_tx;
48167- hw_if->disable_tx = xgbe_disable_tx;
48168- hw_if->enable_rx = xgbe_enable_rx;
48169- hw_if->disable_rx = xgbe_disable_rx;
48170-
48171- hw_if->powerup_tx = xgbe_powerup_tx;
48172- hw_if->powerdown_tx = xgbe_powerdown_tx;
48173- hw_if->powerup_rx = xgbe_powerup_rx;
48174- hw_if->powerdown_rx = xgbe_powerdown_rx;
48175-
48176- hw_if->dev_xmit = xgbe_dev_xmit;
48177- hw_if->dev_read = xgbe_dev_read;
48178- hw_if->enable_int = xgbe_enable_int;
48179- hw_if->disable_int = xgbe_disable_int;
48180- hw_if->init = xgbe_init;
48181- hw_if->exit = xgbe_exit;
48182+const struct xgbe_hw_if default_xgbe_hw_if = {
48183+ .tx_complete = xgbe_tx_complete,
48184+
48185+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48186+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48187+ .add_mac_addresses = xgbe_add_mac_addresses,
48188+ .set_mac_address = xgbe_set_mac_address,
48189+
48190+ .enable_rx_csum = xgbe_enable_rx_csum,
48191+ .disable_rx_csum = xgbe_disable_rx_csum,
48192+
48193+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48194+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48195+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48196+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48197+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48198+
48199+ .read_mmd_regs = xgbe_read_mmd_regs,
48200+ .write_mmd_regs = xgbe_write_mmd_regs,
48201+
48202+ .set_gmii_speed = xgbe_set_gmii_speed,
48203+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48204+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48205+
48206+ .enable_tx = xgbe_enable_tx,
48207+ .disable_tx = xgbe_disable_tx,
48208+ .enable_rx = xgbe_enable_rx,
48209+ .disable_rx = xgbe_disable_rx,
48210+
48211+ .powerup_tx = xgbe_powerup_tx,
48212+ .powerdown_tx = xgbe_powerdown_tx,
48213+ .powerup_rx = xgbe_powerup_rx,
48214+ .powerdown_rx = xgbe_powerdown_rx,
48215+
48216+ .dev_xmit = xgbe_dev_xmit,
48217+ .dev_read = xgbe_dev_read,
48218+ .enable_int = xgbe_enable_int,
48219+ .disable_int = xgbe_disable_int,
48220+ .init = xgbe_init,
48221+ .exit = xgbe_exit,
48222
48223 /* Descriptor related Sequences have to be initialized here */
48224- hw_if->tx_desc_init = xgbe_tx_desc_init;
48225- hw_if->rx_desc_init = xgbe_rx_desc_init;
48226- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48227- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48228- hw_if->is_last_desc = xgbe_is_last_desc;
48229- hw_if->is_context_desc = xgbe_is_context_desc;
48230- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
48231+ .tx_desc_init = xgbe_tx_desc_init,
48232+ .rx_desc_init = xgbe_rx_desc_init,
48233+ .tx_desc_reset = xgbe_tx_desc_reset,
48234+ .rx_desc_reset = xgbe_rx_desc_reset,
48235+ .is_last_desc = xgbe_is_last_desc,
48236+ .is_context_desc = xgbe_is_context_desc,
48237+ .tx_start_xmit = xgbe_tx_start_xmit,
48238
48239 /* For FLOW ctrl */
48240- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48241- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48242+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48243+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48244
48245 /* For RX coalescing */
48246- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48247- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48248- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48249- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48250+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48251+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48252+ .usec_to_riwt = xgbe_usec_to_riwt,
48253+ .riwt_to_usec = xgbe_riwt_to_usec,
48254
48255 /* For RX and TX threshold config */
48256- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48257- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48258+ .config_rx_threshold = xgbe_config_rx_threshold,
48259+ .config_tx_threshold = xgbe_config_tx_threshold,
48260
48261 /* For RX and TX Store and Forward Mode config */
48262- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48263- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48264+ .config_rsf_mode = xgbe_config_rsf_mode,
48265+ .config_tsf_mode = xgbe_config_tsf_mode,
48266
48267 /* For TX DMA Operating on Second Frame config */
48268- hw_if->config_osp_mode = xgbe_config_osp_mode;
48269+ .config_osp_mode = xgbe_config_osp_mode,
48270
48271 /* For RX and TX PBL config */
48272- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48273- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48274- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48275- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48276- hw_if->config_pblx8 = xgbe_config_pblx8;
48277+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48278+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48279+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48280+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48281+ .config_pblx8 = xgbe_config_pblx8,
48282
48283 /* For MMC statistics support */
48284- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48285- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48286- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48287+ .tx_mmc_int = xgbe_tx_mmc_int,
48288+ .rx_mmc_int = xgbe_rx_mmc_int,
48289+ .read_mmc_stats = xgbe_read_mmc_stats,
48290
48291 /* For PTP config */
48292- hw_if->config_tstamp = xgbe_config_tstamp;
48293- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48294- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48295- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48296- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48297+ .config_tstamp = xgbe_config_tstamp,
48298+ .update_tstamp_addend = xgbe_update_tstamp_addend,
48299+ .set_tstamp_time = xgbe_set_tstamp_time,
48300+ .get_tstamp_time = xgbe_get_tstamp_time,
48301+ .get_tx_tstamp = xgbe_get_tx_tstamp,
48302
48303 /* For Data Center Bridging config */
48304- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48305- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48306+ .config_dcb_tc = xgbe_config_dcb_tc,
48307+ .config_dcb_pfc = xgbe_config_dcb_pfc,
48308
48309 /* For Receive Side Scaling */
48310- hw_if->enable_rss = xgbe_enable_rss;
48311- hw_if->disable_rss = xgbe_disable_rss;
48312- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
48313- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
48314-
48315- DBGPR("<--xgbe_init_function_ptrs\n");
48316-}
48317+ .enable_rss = xgbe_enable_rss,
48318+ .disable_rss = xgbe_disable_rss,
48319+ .set_rss_hash_key = xgbe_set_rss_hash_key,
48320+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
48321+};
48322diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48323index 885b02b..4b31a4c 100644
48324--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48325+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48326@@ -244,7 +244,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
48327 * support, tell it now
48328 */
48329 if (ring->tx.xmit_more)
48330- pdata->hw_if.tx_start_xmit(channel, ring);
48331+ pdata->hw_if->tx_start_xmit(channel, ring);
48332
48333 return NETDEV_TX_BUSY;
48334 }
48335@@ -272,7 +272,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48336
48337 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48338 {
48339- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48340+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48341 struct xgbe_channel *channel;
48342 enum xgbe_int int_id;
48343 unsigned int i;
48344@@ -294,7 +294,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48345
48346 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48347 {
48348- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48349+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48350 struct xgbe_channel *channel;
48351 enum xgbe_int int_id;
48352 unsigned int i;
48353@@ -317,7 +317,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48354 static irqreturn_t xgbe_isr(int irq, void *data)
48355 {
48356 struct xgbe_prv_data *pdata = data;
48357- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48358+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48359 struct xgbe_channel *channel;
48360 unsigned int dma_isr, dma_ch_isr;
48361 unsigned int mac_isr, mac_tssr;
48362@@ -673,7 +673,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
48363
48364 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48365 {
48366- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48367+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48368
48369 DBGPR("-->xgbe_init_tx_coalesce\n");
48370
48371@@ -687,7 +687,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48372
48373 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48374 {
48375- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48376+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48377
48378 DBGPR("-->xgbe_init_rx_coalesce\n");
48379
48380@@ -701,7 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48381
48382 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48383 {
48384- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48385+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48386 struct xgbe_channel *channel;
48387 struct xgbe_ring *ring;
48388 struct xgbe_ring_data *rdata;
48389@@ -726,7 +726,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48390
48391 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48392 {
48393- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48394+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48395 struct xgbe_channel *channel;
48396 struct xgbe_ring *ring;
48397 struct xgbe_ring_data *rdata;
48398@@ -752,7 +752,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48399 static void xgbe_adjust_link(struct net_device *netdev)
48400 {
48401 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48402- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48403+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48404 struct phy_device *phydev = pdata->phydev;
48405 int new_state = 0;
48406
48407@@ -860,7 +860,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48408 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48409 {
48410 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48411- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48412+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48413 unsigned long flags;
48414
48415 DBGPR("-->xgbe_powerdown\n");
48416@@ -898,7 +898,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48417 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48418 {
48419 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48420- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48421+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48422 unsigned long flags;
48423
48424 DBGPR("-->xgbe_powerup\n");
48425@@ -935,7 +935,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48426
48427 static int xgbe_start(struct xgbe_prv_data *pdata)
48428 {
48429- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48430+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48431 struct net_device *netdev = pdata->netdev;
48432 int ret;
48433
48434@@ -976,7 +976,7 @@ err_napi:
48435
48436 static void xgbe_stop(struct xgbe_prv_data *pdata)
48437 {
48438- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48439+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48440 struct xgbe_channel *channel;
48441 struct net_device *netdev = pdata->netdev;
48442 struct netdev_queue *txq;
48443@@ -1203,7 +1203,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48444 return -ERANGE;
48445 }
48446
48447- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48448+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48449
48450 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48451
48452@@ -1352,7 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48453 static int xgbe_open(struct net_device *netdev)
48454 {
48455 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48456- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48457+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48458 int ret;
48459
48460 DBGPR("-->xgbe_open\n");
48461@@ -1424,7 +1424,7 @@ err_phy_init:
48462 static int xgbe_close(struct net_device *netdev)
48463 {
48464 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48465- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48466+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48467
48468 DBGPR("-->xgbe_close\n");
48469
48470@@ -1452,8 +1452,8 @@ static int xgbe_close(struct net_device *netdev)
48471 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48472 {
48473 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48474- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48475- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48476+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48477+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48478 struct xgbe_channel *channel;
48479 struct xgbe_ring *ring;
48480 struct xgbe_packet_data *packet;
48481@@ -1521,7 +1521,7 @@ tx_netdev_return:
48482 static void xgbe_set_rx_mode(struct net_device *netdev)
48483 {
48484 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48485- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48486+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48487 unsigned int pr_mode, am_mode;
48488
48489 DBGPR("-->xgbe_set_rx_mode\n");
48490@@ -1540,7 +1540,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48491 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48492 {
48493 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48494- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48495+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48496 struct sockaddr *saddr = addr;
48497
48498 DBGPR("-->xgbe_set_mac_address\n");
48499@@ -1607,7 +1607,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48500
48501 DBGPR("-->%s\n", __func__);
48502
48503- pdata->hw_if.read_mmc_stats(pdata);
48504+ pdata->hw_if->read_mmc_stats(pdata);
48505
48506 s->rx_packets = pstats->rxframecount_gb;
48507 s->rx_bytes = pstats->rxoctetcount_gb;
48508@@ -1634,7 +1634,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48509 u16 vid)
48510 {
48511 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48512- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48513+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48514
48515 DBGPR("-->%s\n", __func__);
48516
48517@@ -1650,7 +1650,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48518 u16 vid)
48519 {
48520 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48521- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48522+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48523
48524 DBGPR("-->%s\n", __func__);
48525
48526@@ -1716,7 +1716,7 @@ static int xgbe_set_features(struct net_device *netdev,
48527 netdev_features_t features)
48528 {
48529 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48530- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48531+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48532 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
48533 int ret = 0;
48534
48535@@ -1781,8 +1781,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48536 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48537 {
48538 struct xgbe_prv_data *pdata = channel->pdata;
48539- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48540- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48541+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48542+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48543 struct xgbe_ring *ring = channel->rx_ring;
48544 struct xgbe_ring_data *rdata;
48545
48546@@ -1835,8 +1835,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
48547 static int xgbe_tx_poll(struct xgbe_channel *channel)
48548 {
48549 struct xgbe_prv_data *pdata = channel->pdata;
48550- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48551- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48552+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48553+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48554 struct xgbe_ring *ring = channel->tx_ring;
48555 struct xgbe_ring_data *rdata;
48556 struct xgbe_ring_desc *rdesc;
48557@@ -1901,7 +1901,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48558 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48559 {
48560 struct xgbe_prv_data *pdata = channel->pdata;
48561- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48562+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48563 struct xgbe_ring *ring = channel->rx_ring;
48564 struct xgbe_ring_data *rdata;
48565 struct xgbe_packet_data *packet;
48566diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48567index ebf4893..a8f51c6 100644
48568--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48569+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48570@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48571
48572 DBGPR("-->%s\n", __func__);
48573
48574- pdata->hw_if.read_mmc_stats(pdata);
48575+ pdata->hw_if->read_mmc_stats(pdata);
48576 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48577 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48578 *data++ = *(u64 *)stat;
48579@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48580 struct ethtool_coalesce *ec)
48581 {
48582 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48583- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48584+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48585 unsigned int riwt;
48586
48587 DBGPR("-->xgbe_get_coalesce\n");
48588@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48589 struct ethtool_coalesce *ec)
48590 {
48591 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48592- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48593+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48594 unsigned int rx_frames, rx_riwt, rx_usecs;
48595 unsigned int tx_frames, tx_usecs;
48596
48597@@ -536,7 +536,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
48598 const u8 *key, const u8 hfunc)
48599 {
48600 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48601- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48602+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48603 unsigned int ret;
48604
48605 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
48606diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48607index 32dd651..225cca3 100644
48608--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48609+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48610@@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48611 DBGPR("<--xgbe_default_config\n");
48612 }
48613
48614-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48615-{
48616- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48617- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48618-}
48619-
48620 #ifdef CONFIG_ACPI
48621 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
48622 {
48623@@ -396,9 +390,8 @@ static int xgbe_probe(struct platform_device *pdev)
48624 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
48625
48626 /* Set all the function pointers */
48627- xgbe_init_all_fptrs(pdata);
48628- hw_if = &pdata->hw_if;
48629- desc_if = &pdata->desc_if;
48630+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48631+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48632
48633 /* Issue software reset to device */
48634 hw_if->exit(pdata);
48635diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48636index 59e267f..0842a88 100644
48637--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48638+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48639@@ -126,7 +126,7 @@
48640 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48641 {
48642 struct xgbe_prv_data *pdata = mii->priv;
48643- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48644+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48645 int mmd_data;
48646
48647 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48648@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48649 u16 mmd_val)
48650 {
48651 struct xgbe_prv_data *pdata = mii->priv;
48652- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48653+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48654 int mmd_data = mmd_val;
48655
48656 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48657diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48658index f326178..8bd7daf 100644
48659--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48660+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48661@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48662 tstamp_cc);
48663 u64 nsec;
48664
48665- nsec = pdata->hw_if.get_tstamp_time(pdata);
48666+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48667
48668 return nsec;
48669 }
48670@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48671
48672 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48673
48674- pdata->hw_if.update_tstamp_addend(pdata, addend);
48675+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48676
48677 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48678
48679diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48680index 13e8f95..1d8beef 100644
48681--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48682+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48683@@ -675,8 +675,8 @@ struct xgbe_prv_data {
48684 int dev_irq;
48685 unsigned int per_channel_irq;
48686
48687- struct xgbe_hw_if hw_if;
48688- struct xgbe_desc_if desc_if;
48689+ struct xgbe_hw_if *hw_if;
48690+ struct xgbe_desc_if *desc_if;
48691
48692 /* AXI DMA settings */
48693 unsigned int coherent;
48694@@ -798,6 +798,9 @@ struct xgbe_prv_data {
48695 #endif
48696 };
48697
48698+extern const struct xgbe_hw_if default_xgbe_hw_if;
48699+extern const struct xgbe_desc_if default_xgbe_desc_if;
48700+
48701 /* Function prototypes*/
48702
48703 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48704diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48705index adcacda..fa6e0ae 100644
48706--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48707+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48708@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48709 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48710 {
48711 /* RX_MODE controlling object */
48712- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48713+ bnx2x_init_rx_mode_obj(bp);
48714
48715 /* multicast configuration controlling object */
48716 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48717diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48718index 07cdf9b..b08ecc7 100644
48719--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48720+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48721@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48722 return rc;
48723 }
48724
48725-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48726- struct bnx2x_rx_mode_obj *o)
48727+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48728 {
48729 if (CHIP_IS_E1x(bp)) {
48730- o->wait_comp = bnx2x_empty_rx_mode_wait;
48731- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48732+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48733+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48734 } else {
48735- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48736- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48737+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48738+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48739 }
48740 }
48741
48742diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48743index 86baecb..ff3bb46 100644
48744--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48745+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48746@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48747
48748 /********************* RX MODE ****************/
48749
48750-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48751- struct bnx2x_rx_mode_obj *o);
48752+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48753
48754 /**
48755 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48756diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48757index 31c9f82..e65e986 100644
48758--- a/drivers/net/ethernet/broadcom/tg3.h
48759+++ b/drivers/net/ethernet/broadcom/tg3.h
48760@@ -150,6 +150,7 @@
48761 #define CHIPREV_ID_5750_A0 0x4000
48762 #define CHIPREV_ID_5750_A1 0x4001
48763 #define CHIPREV_ID_5750_A3 0x4003
48764+#define CHIPREV_ID_5750_C1 0x4201
48765 #define CHIPREV_ID_5750_C2 0x4202
48766 #define CHIPREV_ID_5752_A0_HW 0x5000
48767 #define CHIPREV_ID_5752_A0 0x6000
48768diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48769index 903466e..b285864 100644
48770--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48771+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48772@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
48773 }
48774
48775 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48776- bna_cb_ioceth_enable,
48777- bna_cb_ioceth_disable,
48778- bna_cb_ioceth_hbfail,
48779- bna_cb_ioceth_reset
48780+ .enable_cbfn = bna_cb_ioceth_enable,
48781+ .disable_cbfn = bna_cb_ioceth_disable,
48782+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
48783+ .reset_cbfn = bna_cb_ioceth_reset
48784 };
48785
48786 static void bna_attr_init(struct bna_ioceth *ioceth)
48787diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48788index 8cffcdf..aadf043 100644
48789--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48790+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48791@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48792 */
48793 struct l2t_skb_cb {
48794 arp_failure_handler_func arp_failure_handler;
48795-};
48796+} __no_const;
48797
48798 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
48799
48800diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48801index d929951..a2c23f5 100644
48802--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48803+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48804@@ -2215,7 +2215,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
48805
48806 int i;
48807 struct adapter *ap = netdev2adap(dev);
48808- static const unsigned int *reg_ranges;
48809+ const unsigned int *reg_ranges;
48810 int arr_size = 0, buf_size = 0;
48811
48812 if (is_t4(ap->params.chip)) {
48813diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48814index badff18..e15c4ec 100644
48815--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48816+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48817@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48818 for (i=0; i<ETH_ALEN; i++) {
48819 tmp.addr[i] = dev->dev_addr[i];
48820 }
48821- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48822+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48823 break;
48824
48825 case DE4X5_SET_HWADDR: /* Set the hardware address */
48826@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48827 spin_lock_irqsave(&lp->lock, flags);
48828 memcpy(&statbuf, &lp->pktStats, ioc->len);
48829 spin_unlock_irqrestore(&lp->lock, flags);
48830- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48831+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48832 return -EFAULT;
48833 break;
48834 }
48835diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48836index e6b790f..051ba2d 100644
48837--- a/drivers/net/ethernet/emulex/benet/be_main.c
48838+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48839@@ -536,7 +536,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48840
48841 if (wrapped)
48842 newacc += 65536;
48843- ACCESS_ONCE(*acc) = newacc;
48844+ ACCESS_ONCE_RW(*acc) = newacc;
48845 }
48846
48847 static void populate_erx_stats(struct be_adapter *adapter,
48848diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48849index 6d0c5d5..55be363 100644
48850--- a/drivers/net/ethernet/faraday/ftgmac100.c
48851+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48852@@ -30,6 +30,8 @@
48853 #include <linux/netdevice.h>
48854 #include <linux/phy.h>
48855 #include <linux/platform_device.h>
48856+#include <linux/interrupt.h>
48857+#include <linux/irqreturn.h>
48858 #include <net/ip.h>
48859
48860 #include "ftgmac100.h"
48861diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48862index dce5f7b..2433466 100644
48863--- a/drivers/net/ethernet/faraday/ftmac100.c
48864+++ b/drivers/net/ethernet/faraday/ftmac100.c
48865@@ -31,6 +31,8 @@
48866 #include <linux/module.h>
48867 #include <linux/netdevice.h>
48868 #include <linux/platform_device.h>
48869+#include <linux/interrupt.h>
48870+#include <linux/irqreturn.h>
48871
48872 #include "ftmac100.h"
48873
48874diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48875index fabcfa1..188fd22 100644
48876--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48877+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48878@@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48879 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48880
48881 /* Update the base adjustement value. */
48882- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48883+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48884 smp_mb(); /* Force the above update. */
48885 }
48886
48887diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48888index 79c00f5..8da39f6 100644
48889--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48890+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48891@@ -785,7 +785,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48892 }
48893
48894 /* update the base incval used to calculate frequency adjustment */
48895- ACCESS_ONCE(adapter->base_incval) = incval;
48896+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48897 smp_mb();
48898
48899 /* need lock to prevent incorrect read while modifying cyclecounter */
48900diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48901index 8c234ec..757331f 100644
48902--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48903+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48904@@ -468,8 +468,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48905 wmb();
48906
48907 /* we want to dirty this cache line once */
48908- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48909- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48910+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48911+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48912
48913 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48914
48915diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48916index 6223930..975033d 100644
48917--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48918+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48919@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48920 struct __vxge_hw_fifo *fifo;
48921 struct vxge_hw_fifo_config *config;
48922 u32 txdl_size, txdl_per_memblock;
48923- struct vxge_hw_mempool_cbs fifo_mp_callback;
48924+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48925+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48926+ };
48927+
48928 struct __vxge_hw_virtualpath *vpath;
48929
48930 if ((vp == NULL) || (attr == NULL)) {
48931@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48932 goto exit;
48933 }
48934
48935- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48936-
48937 fifo->mempool =
48938 __vxge_hw_mempool_create(vpath->hldev,
48939 fifo->config->memblock_size,
48940diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48941index 2bb48d5..d1a865d 100644
48942--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48943+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48944@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48945 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48946 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48947 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48948- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48949+ pax_open_kernel();
48950+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48951+ pax_close_kernel();
48952 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48953 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48954 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48955diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48956index be7d7a6..a8983f8 100644
48957--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48958+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48959@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48960 case QLCNIC_NON_PRIV_FUNC:
48961 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48962 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48963- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48964+ pax_open_kernel();
48965+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48966+ pax_close_kernel();
48967 break;
48968 case QLCNIC_PRIV_FUNC:
48969 ahw->op_mode = QLCNIC_PRIV_FUNC;
48970 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48971- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48972+ pax_open_kernel();
48973+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48974+ pax_close_kernel();
48975 break;
48976 case QLCNIC_MGMT_FUNC:
48977 ahw->op_mode = QLCNIC_MGMT_FUNC;
48978 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48979- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48980+ pax_open_kernel();
48981+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48982+ pax_close_kernel();
48983 break;
48984 default:
48985 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48986diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48987index 332bb8a..e6adcd1 100644
48988--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48989+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48990@@ -1285,7 +1285,7 @@ flash_temp:
48991 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48992 {
48993 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48994- static const struct qlcnic_dump_operations *fw_dump_ops;
48995+ const struct qlcnic_dump_operations *fw_dump_ops;
48996 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48997 u32 entry_offset, dump, no_entries, buf_offset = 0;
48998 int i, k, ops_cnt, ops_index, dump_size = 0;
48999diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49000index c70ab40..00b28e0 100644
49001--- a/drivers/net/ethernet/realtek/r8169.c
49002+++ b/drivers/net/ethernet/realtek/r8169.c
49003@@ -788,22 +788,22 @@ struct rtl8169_private {
49004 struct mdio_ops {
49005 void (*write)(struct rtl8169_private *, int, int);
49006 int (*read)(struct rtl8169_private *, int);
49007- } mdio_ops;
49008+ } __no_const mdio_ops;
49009
49010 struct pll_power_ops {
49011 void (*down)(struct rtl8169_private *);
49012 void (*up)(struct rtl8169_private *);
49013- } pll_power_ops;
49014+ } __no_const pll_power_ops;
49015
49016 struct jumbo_ops {
49017 void (*enable)(struct rtl8169_private *);
49018 void (*disable)(struct rtl8169_private *);
49019- } jumbo_ops;
49020+ } __no_const jumbo_ops;
49021
49022 struct csi_ops {
49023 void (*write)(struct rtl8169_private *, int, int);
49024 u32 (*read)(struct rtl8169_private *, int);
49025- } csi_ops;
49026+ } __no_const csi_ops;
49027
49028 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49029 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49030diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49031index 6b861e3..204ac86 100644
49032--- a/drivers/net/ethernet/sfc/ptp.c
49033+++ b/drivers/net/ethernet/sfc/ptp.c
49034@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49035 ptp->start.dma_addr);
49036
49037 /* Clear flag that signals MC ready */
49038- ACCESS_ONCE(*start) = 0;
49039+ ACCESS_ONCE_RW(*start) = 0;
49040 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49041 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49042 EFX_BUG_ON_PARANOID(rc);
49043diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
49044index 10b6173..b605dfd5 100644
49045--- a/drivers/net/ethernet/sfc/selftest.c
49046+++ b/drivers/net/ethernet/sfc/selftest.c
49047@@ -46,7 +46,7 @@ struct efx_loopback_payload {
49048 struct iphdr ip;
49049 struct udphdr udp;
49050 __be16 iteration;
49051- const char msg[64];
49052+ char msg[64];
49053 } __packed;
49054
49055 /* Loopback test source MAC address */
49056diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49057index 08c483b..2c4a553 100644
49058--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49059+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49060@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49061
49062 writel(value, ioaddr + MMC_CNTRL);
49063
49064- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49065- MMC_CNTRL, value);
49066+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49067+// MMC_CNTRL, value);
49068 }
49069
49070 /* To mask all all interrupts.*/
49071diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
49072index 17e2766..c332f1e 100644
49073--- a/drivers/net/ethernet/via/via-rhine.c
49074+++ b/drivers/net/ethernet/via/via-rhine.c
49075@@ -2514,7 +2514,7 @@ static struct platform_driver rhine_driver_platform = {
49076 }
49077 };
49078
49079-static struct dmi_system_id rhine_dmi_table[] __initdata = {
49080+static const struct dmi_system_id rhine_dmi_table[] __initconst = {
49081 {
49082 .ident = "EPIA-M",
49083 .matches = {
49084diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49085index 384ca4f..dd7d4f9 100644
49086--- a/drivers/net/hyperv/hyperv_net.h
49087+++ b/drivers/net/hyperv/hyperv_net.h
49088@@ -171,7 +171,7 @@ struct rndis_device {
49089 enum rndis_device_state state;
49090 bool link_state;
49091 bool link_change;
49092- atomic_t new_req_id;
49093+ atomic_unchecked_t new_req_id;
49094
49095 spinlock_t request_lock;
49096 struct list_head req_list;
49097diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49098index 7816d98..7890614 100644
49099--- a/drivers/net/hyperv/rndis_filter.c
49100+++ b/drivers/net/hyperv/rndis_filter.c
49101@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49102 * template
49103 */
49104 set = &rndis_msg->msg.set_req;
49105- set->req_id = atomic_inc_return(&dev->new_req_id);
49106+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49107
49108 /* Add to the request list */
49109 spin_lock_irqsave(&dev->request_lock, flags);
49110@@ -918,7 +918,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49111
49112 /* Setup the rndis set */
49113 halt = &request->request_msg.msg.halt_req;
49114- halt->req_id = atomic_inc_return(&dev->new_req_id);
49115+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49116
49117 /* Ignore return since this msg is optional. */
49118 rndis_filter_send_request(dev, request);
49119diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
49120index 34f846b..4a0d5b1 100644
49121--- a/drivers/net/ifb.c
49122+++ b/drivers/net/ifb.c
49123@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
49124 return 0;
49125 }
49126
49127-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
49128+static struct rtnl_link_ops ifb_link_ops = {
49129 .kind = "ifb",
49130 .priv_size = sizeof(struct ifb_private),
49131 .setup = ifb_setup,
49132diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49133index 1df38bd..4bc20b0 100644
49134--- a/drivers/net/macvlan.c
49135+++ b/drivers/net/macvlan.c
49136@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49137 free_nskb:
49138 kfree_skb(nskb);
49139 err:
49140- atomic_long_inc(&skb->dev->rx_dropped);
49141+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49142 }
49143
49144 static void macvlan_flush_sources(struct macvlan_port *port,
49145@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49146 int macvlan_link_register(struct rtnl_link_ops *ops)
49147 {
49148 /* common fields */
49149- ops->priv_size = sizeof(struct macvlan_dev);
49150- ops->validate = macvlan_validate;
49151- ops->maxtype = IFLA_MACVLAN_MAX;
49152- ops->policy = macvlan_policy;
49153- ops->changelink = macvlan_changelink;
49154- ops->get_size = macvlan_get_size;
49155- ops->fill_info = macvlan_fill_info;
49156+ pax_open_kernel();
49157+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49158+ *(void **)&ops->validate = macvlan_validate;
49159+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49160+ *(const void **)&ops->policy = macvlan_policy;
49161+ *(void **)&ops->changelink = macvlan_changelink;
49162+ *(void **)&ops->get_size = macvlan_get_size;
49163+ *(void **)&ops->fill_info = macvlan_fill_info;
49164+ pax_close_kernel();
49165
49166 return rtnl_link_register(ops);
49167 };
49168@@ -1551,7 +1553,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49169 return NOTIFY_DONE;
49170 }
49171
49172-static struct notifier_block macvlan_notifier_block __read_mostly = {
49173+static struct notifier_block macvlan_notifier_block = {
49174 .notifier_call = macvlan_device_event,
49175 };
49176
49177diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49178index 27ecc5c..f636328 100644
49179--- a/drivers/net/macvtap.c
49180+++ b/drivers/net/macvtap.c
49181@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
49182 dev->tx_queue_len = TUN_READQ_SIZE;
49183 }
49184
49185-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
49186+static struct rtnl_link_ops macvtap_link_ops = {
49187 .kind = "macvtap",
49188 .setup = macvtap_setup,
49189 .newlink = macvtap_newlink,
49190@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49191
49192 ret = 0;
49193 u = q->flags;
49194- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49195+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49196 put_user(u, &ifr->ifr_flags))
49197 ret = -EFAULT;
49198 macvtap_put_vlan(vlan);
49199@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49200 return NOTIFY_DONE;
49201 }
49202
49203-static struct notifier_block macvtap_notifier_block __read_mostly = {
49204+static struct notifier_block macvtap_notifier_block = {
49205 .notifier_call = macvtap_device_event,
49206 };
49207
49208diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
49209index 34924df..a747360 100644
49210--- a/drivers/net/nlmon.c
49211+++ b/drivers/net/nlmon.c
49212@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
49213 return 0;
49214 }
49215
49216-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
49217+static struct rtnl_link_ops nlmon_link_ops = {
49218 .kind = "nlmon",
49219 .priv_size = sizeof(struct nlmon),
49220 .setup = nlmon_setup,
49221diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
49222index bdfe51f..e7845c7 100644
49223--- a/drivers/net/phy/phy_device.c
49224+++ b/drivers/net/phy/phy_device.c
49225@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
49226 * zero on success.
49227 *
49228 */
49229-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49230+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
49231 struct phy_c45_device_ids *c45_ids) {
49232 int phy_reg;
49233 int i, reg_addr;
49234@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49235 * its return value is in turn returned.
49236 *
49237 */
49238-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49239+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
49240 bool is_c45, struct phy_c45_device_ids *c45_ids)
49241 {
49242 int phy_reg;
49243@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49244 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
49245 {
49246 struct phy_c45_device_ids c45_ids = {0};
49247- u32 phy_id = 0;
49248+ int phy_id = 0;
49249 int r;
49250
49251 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
49252diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49253index 9d15566..5ad4ef6 100644
49254--- a/drivers/net/ppp/ppp_generic.c
49255+++ b/drivers/net/ppp/ppp_generic.c
49256@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49257 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49258 struct ppp_stats stats;
49259 struct ppp_comp_stats cstats;
49260- char *vers;
49261
49262 switch (cmd) {
49263 case SIOCGPPPSTATS:
49264@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49265 break;
49266
49267 case SIOCGPPPVER:
49268- vers = PPP_VERSION;
49269- if (copy_to_user(addr, vers, strlen(vers) + 1))
49270+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49271 break;
49272 err = 0;
49273 break;
49274diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49275index 079f7ad..b2a2bfa7 100644
49276--- a/drivers/net/slip/slhc.c
49277+++ b/drivers/net/slip/slhc.c
49278@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49279 register struct tcphdr *thp;
49280 register struct iphdr *ip;
49281 register struct cstate *cs;
49282- int len, hdrlen;
49283+ long len, hdrlen;
49284 unsigned char *cp = icp;
49285
49286 /* We've got a compressed packet; read the change byte */
49287diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49288index 7d39484..d58499d 100644
49289--- a/drivers/net/team/team.c
49290+++ b/drivers/net/team/team.c
49291@@ -2099,7 +2099,7 @@ static unsigned int team_get_num_rx_queues(void)
49292 return TEAM_DEFAULT_NUM_RX_QUEUES;
49293 }
49294
49295-static struct rtnl_link_ops team_link_ops __read_mostly = {
49296+static struct rtnl_link_ops team_link_ops = {
49297 .kind = DRV_NAME,
49298 .priv_size = sizeof(struct team),
49299 .setup = team_setup,
49300@@ -2889,7 +2889,7 @@ static int team_device_event(struct notifier_block *unused,
49301 return NOTIFY_DONE;
49302 }
49303
49304-static struct notifier_block team_notifier_block __read_mostly = {
49305+static struct notifier_block team_notifier_block = {
49306 .notifier_call = team_device_event,
49307 };
49308
49309diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49310index 857dca4..642f532 100644
49311--- a/drivers/net/tun.c
49312+++ b/drivers/net/tun.c
49313@@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
49314 return -EINVAL;
49315 }
49316
49317-static struct rtnl_link_ops tun_link_ops __read_mostly = {
49318+static struct rtnl_link_ops tun_link_ops = {
49319 .kind = DRV_NAME,
49320 .priv_size = sizeof(struct tun_struct),
49321 .setup = tun_setup,
49322@@ -1830,7 +1830,7 @@ unlock:
49323 }
49324
49325 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49326- unsigned long arg, int ifreq_len)
49327+ unsigned long arg, size_t ifreq_len)
49328 {
49329 struct tun_file *tfile = file->private_data;
49330 struct tun_struct *tun;
49331@@ -1844,6 +1844,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49332 int le;
49333 int ret;
49334
49335+ if (ifreq_len > sizeof ifr)
49336+ return -EFAULT;
49337+
49338 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49339 if (copy_from_user(&ifr, argp, ifreq_len))
49340 return -EFAULT;
49341diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49342index 778e915..58c4d95 100644
49343--- a/drivers/net/usb/hso.c
49344+++ b/drivers/net/usb/hso.c
49345@@ -70,7 +70,7 @@
49346 #include <asm/byteorder.h>
49347 #include <linux/serial_core.h>
49348 #include <linux/serial.h>
49349-
49350+#include <asm/local.h>
49351
49352 #define MOD_AUTHOR "Option Wireless"
49353 #define MOD_DESCRIPTION "USB High Speed Option driver"
49354@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49355 struct urb *urb;
49356
49357 urb = serial->rx_urb[0];
49358- if (serial->port.count > 0) {
49359+ if (atomic_read(&serial->port.count) > 0) {
49360 count = put_rxbuf_data(urb, serial);
49361 if (count == -1)
49362 return;
49363@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49364 DUMP1(urb->transfer_buffer, urb->actual_length);
49365
49366 /* Anyone listening? */
49367- if (serial->port.count == 0)
49368+ if (atomic_read(&serial->port.count) == 0)
49369 return;
49370
49371 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49372@@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49373 tty_port_tty_set(&serial->port, tty);
49374
49375 /* check for port already opened, if not set the termios */
49376- serial->port.count++;
49377- if (serial->port.count == 1) {
49378+ if (atomic_inc_return(&serial->port.count) == 1) {
49379 serial->rx_state = RX_IDLE;
49380 /* Force default termio settings */
49381 _hso_serial_set_termios(tty, NULL);
49382@@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49383 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49384 if (result) {
49385 hso_stop_serial_device(serial->parent);
49386- serial->port.count--;
49387+ atomic_dec(&serial->port.count);
49388 } else {
49389 kref_get(&serial->parent->ref);
49390 }
49391@@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49392
49393 /* reset the rts and dtr */
49394 /* do the actual close */
49395- serial->port.count--;
49396+ atomic_dec(&serial->port.count);
49397
49398- if (serial->port.count <= 0) {
49399- serial->port.count = 0;
49400+ if (atomic_read(&serial->port.count) <= 0) {
49401+ atomic_set(&serial->port.count, 0);
49402 tty_port_tty_set(&serial->port, NULL);
49403 if (!usb_gone)
49404 hso_stop_serial_device(serial->parent);
49405@@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49406
49407 /* the actual setup */
49408 spin_lock_irqsave(&serial->serial_lock, flags);
49409- if (serial->port.count)
49410+ if (atomic_read(&serial->port.count))
49411 _hso_serial_set_termios(tty, old);
49412 else
49413 tty->termios = *old;
49414@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
49415 D1("Pending read interrupt on port %d\n", i);
49416 spin_lock(&serial->serial_lock);
49417 if (serial->rx_state == RX_IDLE &&
49418- serial->port.count > 0) {
49419+ atomic_read(&serial->port.count) > 0) {
49420 /* Setup and send a ctrl req read on
49421 * port i */
49422 if (!serial->rx_urb_filled[0]) {
49423@@ -3053,7 +3052,7 @@ static int hso_resume(struct usb_interface *iface)
49424 /* Start all serial ports */
49425 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49426 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49427- if (dev2ser(serial_table[i])->port.count) {
49428+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49429 result =
49430 hso_start_serial_device(serial_table[i], GFP_NOIO);
49431 hso_kick_transmit(dev2ser(serial_table[i]));
49432diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49433index 9f7c0ab..1577b4a 100644
49434--- a/drivers/net/usb/r8152.c
49435+++ b/drivers/net/usb/r8152.c
49436@@ -601,7 +601,7 @@ struct r8152 {
49437 void (*unload)(struct r8152 *);
49438 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
49439 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
49440- } rtl_ops;
49441+ } __no_const rtl_ops;
49442
49443 int intr_interval;
49444 u32 saved_wolopts;
49445diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49446index a2515887..6d13233 100644
49447--- a/drivers/net/usb/sierra_net.c
49448+++ b/drivers/net/usb/sierra_net.c
49449@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49450 /* atomic counter partially included in MAC address to make sure 2 devices
49451 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49452 */
49453-static atomic_t iface_counter = ATOMIC_INIT(0);
49454+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49455
49456 /*
49457 * SYNC Timer Delay definition used to set the expiry time
49458@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49459 dev->net->netdev_ops = &sierra_net_device_ops;
49460
49461 /* change MAC addr to include, ifacenum, and to be unique */
49462- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49463+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49464 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49465
49466 /* we will have to manufacture ethernet headers, prepare template */
49467diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
49468index 777757a..395a767 100644
49469--- a/drivers/net/usb/usbnet.c
49470+++ b/drivers/net/usb/usbnet.c
49471@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
49472 struct net_device *net)
49473 {
49474 struct usbnet *dev = netdev_priv(net);
49475- int length;
49476+ unsigned int length;
49477 struct urb *urb = NULL;
49478 struct skb_data *entry;
49479 struct driver_info *info = dev->driver_info;
49480@@ -1413,7 +1413,7 @@ not_drop:
49481 }
49482 } else
49483 netif_dbg(dev, tx_queued, dev->net,
49484- "> tx, len %d, type 0x%x\n", length, skb->protocol);
49485+ "> tx, len %u, type 0x%x\n", length, skb->protocol);
49486 #ifdef CONFIG_PM
49487 deferred:
49488 #endif
49489diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49490index 59b0e97..a6ed579 100644
49491--- a/drivers/net/virtio_net.c
49492+++ b/drivers/net/virtio_net.c
49493@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49494 #define RECEIVE_AVG_WEIGHT 64
49495
49496 /* Minimum alignment for mergeable packet buffers. */
49497-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49498+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49499
49500 #define VIRTNET_DRIVER_VERSION "1.0.0"
49501
49502diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49503index fceb637..37c70fd 100644
49504--- a/drivers/net/vxlan.c
49505+++ b/drivers/net/vxlan.c
49506@@ -2935,7 +2935,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
49507 return vxlan->net;
49508 }
49509
49510-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49511+static struct rtnl_link_ops vxlan_link_ops = {
49512 .kind = "vxlan",
49513 .maxtype = IFLA_VXLAN_MAX,
49514 .policy = vxlan_policy,
49515@@ -2983,7 +2983,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49516 return NOTIFY_DONE;
49517 }
49518
49519-static struct notifier_block vxlan_notifier_block __read_mostly = {
49520+static struct notifier_block vxlan_notifier_block = {
49521 .notifier_call = vxlan_lowerdev_event,
49522 };
49523
49524diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49525index 5920c99..ff2e4a5 100644
49526--- a/drivers/net/wan/lmc/lmc_media.c
49527+++ b/drivers/net/wan/lmc/lmc_media.c
49528@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49529 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49530
49531 lmc_media_t lmc_ds3_media = {
49532- lmc_ds3_init, /* special media init stuff */
49533- lmc_ds3_default, /* reset to default state */
49534- lmc_ds3_set_status, /* reset status to state provided */
49535- lmc_dummy_set_1, /* set clock source */
49536- lmc_dummy_set2_1, /* set line speed */
49537- lmc_ds3_set_100ft, /* set cable length */
49538- lmc_ds3_set_scram, /* set scrambler */
49539- lmc_ds3_get_link_status, /* get link status */
49540- lmc_dummy_set_1, /* set link status */
49541- lmc_ds3_set_crc_length, /* set CRC length */
49542- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49543- lmc_ds3_watchdog
49544+ .init = lmc_ds3_init, /* special media init stuff */
49545+ .defaults = lmc_ds3_default, /* reset to default state */
49546+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49547+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49548+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49549+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49550+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49551+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49552+ .set_link_status = lmc_dummy_set_1, /* set link status */
49553+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49554+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49555+ .watchdog = lmc_ds3_watchdog
49556 };
49557
49558 lmc_media_t lmc_hssi_media = {
49559- lmc_hssi_init, /* special media init stuff */
49560- lmc_hssi_default, /* reset to default state */
49561- lmc_hssi_set_status, /* reset status to state provided */
49562- lmc_hssi_set_clock, /* set clock source */
49563- lmc_dummy_set2_1, /* set line speed */
49564- lmc_dummy_set_1, /* set cable length */
49565- lmc_dummy_set_1, /* set scrambler */
49566- lmc_hssi_get_link_status, /* get link status */
49567- lmc_hssi_set_link_status, /* set link status */
49568- lmc_hssi_set_crc_length, /* set CRC length */
49569- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49570- lmc_hssi_watchdog
49571+ .init = lmc_hssi_init, /* special media init stuff */
49572+ .defaults = lmc_hssi_default, /* reset to default state */
49573+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49574+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49575+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49576+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49577+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49578+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49579+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49580+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49581+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49582+ .watchdog = lmc_hssi_watchdog
49583 };
49584
49585-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49586- lmc_ssi_default, /* reset to default state */
49587- lmc_ssi_set_status, /* reset status to state provided */
49588- lmc_ssi_set_clock, /* set clock source */
49589- lmc_ssi_set_speed, /* set line speed */
49590- lmc_dummy_set_1, /* set cable length */
49591- lmc_dummy_set_1, /* set scrambler */
49592- lmc_ssi_get_link_status, /* get link status */
49593- lmc_ssi_set_link_status, /* set link status */
49594- lmc_ssi_set_crc_length, /* set CRC length */
49595- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49596- lmc_ssi_watchdog
49597+lmc_media_t lmc_ssi_media = {
49598+ .init = lmc_ssi_init, /* special media init stuff */
49599+ .defaults = lmc_ssi_default, /* reset to default state */
49600+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49601+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49602+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49603+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49604+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49605+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49606+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49607+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49608+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49609+ .watchdog = lmc_ssi_watchdog
49610 };
49611
49612 lmc_media_t lmc_t1_media = {
49613- lmc_t1_init, /* special media init stuff */
49614- lmc_t1_default, /* reset to default state */
49615- lmc_t1_set_status, /* reset status to state provided */
49616- lmc_t1_set_clock, /* set clock source */
49617- lmc_dummy_set2_1, /* set line speed */
49618- lmc_dummy_set_1, /* set cable length */
49619- lmc_dummy_set_1, /* set scrambler */
49620- lmc_t1_get_link_status, /* get link status */
49621- lmc_dummy_set_1, /* set link status */
49622- lmc_t1_set_crc_length, /* set CRC length */
49623- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49624- lmc_t1_watchdog
49625+ .init = lmc_t1_init, /* special media init stuff */
49626+ .defaults = lmc_t1_default, /* reset to default state */
49627+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49628+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49629+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49630+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49631+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49632+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49633+ .set_link_status = lmc_dummy_set_1, /* set link status */
49634+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49635+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49636+ .watchdog = lmc_t1_watchdog
49637 };
49638
49639 static void
49640diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49641index feacc3b..5bac0de 100644
49642--- a/drivers/net/wan/z85230.c
49643+++ b/drivers/net/wan/z85230.c
49644@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49645
49646 struct z8530_irqhandler z8530_sync =
49647 {
49648- z8530_rx,
49649- z8530_tx,
49650- z8530_status
49651+ .rx = z8530_rx,
49652+ .tx = z8530_tx,
49653+ .status = z8530_status
49654 };
49655
49656 EXPORT_SYMBOL(z8530_sync);
49657@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49658 }
49659
49660 static struct z8530_irqhandler z8530_dma_sync = {
49661- z8530_dma_rx,
49662- z8530_dma_tx,
49663- z8530_dma_status
49664+ .rx = z8530_dma_rx,
49665+ .tx = z8530_dma_tx,
49666+ .status = z8530_dma_status
49667 };
49668
49669 static struct z8530_irqhandler z8530_txdma_sync = {
49670- z8530_rx,
49671- z8530_dma_tx,
49672- z8530_dma_status
49673+ .rx = z8530_rx,
49674+ .tx = z8530_dma_tx,
49675+ .status = z8530_dma_status
49676 };
49677
49678 /**
49679@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49680
49681 struct z8530_irqhandler z8530_nop=
49682 {
49683- z8530_rx_clear,
49684- z8530_tx_clear,
49685- z8530_status_clear
49686+ .rx = z8530_rx_clear,
49687+ .tx = z8530_tx_clear,
49688+ .status = z8530_status_clear
49689 };
49690
49691
49692diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49693index 0b60295..b8bfa5b 100644
49694--- a/drivers/net/wimax/i2400m/rx.c
49695+++ b/drivers/net/wimax/i2400m/rx.c
49696@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49697 if (i2400m->rx_roq == NULL)
49698 goto error_roq_alloc;
49699
49700- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49701+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49702 GFP_KERNEL);
49703 if (rd == NULL) {
49704 result = -ENOMEM;
49705diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49706index e71a2ce..2268d61 100644
49707--- a/drivers/net/wireless/airo.c
49708+++ b/drivers/net/wireless/airo.c
49709@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49710 struct airo_info *ai = dev->ml_priv;
49711 int ridcode;
49712 int enabled;
49713- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49714+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49715 unsigned char *iobuf;
49716
49717 /* Only super-user can write RIDs */
49718diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49719index da92bfa..5a9001a 100644
49720--- a/drivers/net/wireless/at76c50x-usb.c
49721+++ b/drivers/net/wireless/at76c50x-usb.c
49722@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49723 }
49724
49725 /* Convert timeout from the DFU status to jiffies */
49726-static inline unsigned long at76_get_timeout(struct dfu_status *s)
49727+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49728 {
49729 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49730 | (s->poll_timeout[1] << 8)
49731diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49732index 2fd9e18..3f55bdd 100644
49733--- a/drivers/net/wireless/ath/ath10k/htc.c
49734+++ b/drivers/net/wireless/ath/ath10k/htc.c
49735@@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
49736 /* registered target arrival callback from the HIF layer */
49737 int ath10k_htc_init(struct ath10k *ar)
49738 {
49739- struct ath10k_hif_cb htc_callbacks;
49740+ static struct ath10k_hif_cb htc_callbacks = {
49741+ .rx_completion = ath10k_htc_rx_completion_handler,
49742+ .tx_completion = ath10k_htc_tx_completion_handler,
49743+ };
49744 struct ath10k_htc_ep *ep = NULL;
49745 struct ath10k_htc *htc = &ar->htc;
49746
49747@@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
49748 ath10k_htc_reset_endpoint_states(htc);
49749
49750 /* setup HIF layer callbacks */
49751- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49752- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49753 htc->ar = ar;
49754
49755 /* Get HIF default pipe for HTC message exchange */
49756diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
49757index 527179c..a890150 100644
49758--- a/drivers/net/wireless/ath/ath10k/htc.h
49759+++ b/drivers/net/wireless/ath/ath10k/htc.h
49760@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
49761
49762 struct ath10k_htc_ops {
49763 void (*target_send_suspend_complete)(struct ath10k *ar);
49764-};
49765+} __no_const;
49766
49767 struct ath10k_htc_ep_ops {
49768 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
49769 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
49770 void (*ep_tx_credits)(struct ath10k *);
49771-};
49772+} __no_const;
49773
49774 /* service connection information */
49775 struct ath10k_htc_svc_conn_req {
49776diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49777index f816909..e56cd8b 100644
49778--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49779+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49780@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49781 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
49782 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
49783
49784- ACCESS_ONCE(ads->ds_link) = i->link;
49785- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
49786+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
49787+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
49788
49789 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
49790 ctl6 = SM(i->keytype, AR_EncrType);
49791@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49792
49793 if ((i->is_first || i->is_last) &&
49794 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
49795- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
49796+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
49797 | set11nTries(i->rates, 1)
49798 | set11nTries(i->rates, 2)
49799 | set11nTries(i->rates, 3)
49800 | (i->dur_update ? AR_DurUpdateEna : 0)
49801 | SM(0, AR_BurstDur);
49802
49803- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
49804+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
49805 | set11nRate(i->rates, 1)
49806 | set11nRate(i->rates, 2)
49807 | set11nRate(i->rates, 3);
49808 } else {
49809- ACCESS_ONCE(ads->ds_ctl2) = 0;
49810- ACCESS_ONCE(ads->ds_ctl3) = 0;
49811+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
49812+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
49813 }
49814
49815 if (!i->is_first) {
49816- ACCESS_ONCE(ads->ds_ctl0) = 0;
49817- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49818- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49819+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
49820+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49821+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49822 return;
49823 }
49824
49825@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49826 break;
49827 }
49828
49829- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49830+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49831 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49832 | SM(i->txpower[0], AR_XmitPower0)
49833 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49834@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49835 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
49836 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
49837
49838- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49839- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49840+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49841+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49842
49843 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
49844 return;
49845
49846- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49847+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49848 | set11nPktDurRTSCTS(i->rates, 1);
49849
49850- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49851+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49852 | set11nPktDurRTSCTS(i->rates, 3);
49853
49854- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49855+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49856 | set11nRateFlags(i->rates, 1)
49857 | set11nRateFlags(i->rates, 2)
49858 | set11nRateFlags(i->rates, 3)
49859 | SM(i->rtscts_rate, AR_RTSCTSRate);
49860
49861- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49862- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49863- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49864+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49865+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49866+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49867 }
49868
49869 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
49870diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49871index da84b70..83e4978 100644
49872--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49873+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49874@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49875 (i->qcu << AR_TxQcuNum_S) | desc_len;
49876
49877 checksum += val;
49878- ACCESS_ONCE(ads->info) = val;
49879+ ACCESS_ONCE_RW(ads->info) = val;
49880
49881 checksum += i->link;
49882- ACCESS_ONCE(ads->link) = i->link;
49883+ ACCESS_ONCE_RW(ads->link) = i->link;
49884
49885 checksum += i->buf_addr[0];
49886- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49887+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
49888 checksum += i->buf_addr[1];
49889- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
49890+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
49891 checksum += i->buf_addr[2];
49892- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
49893+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
49894 checksum += i->buf_addr[3];
49895- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
49896+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
49897
49898 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
49899- ACCESS_ONCE(ads->ctl3) = val;
49900+ ACCESS_ONCE_RW(ads->ctl3) = val;
49901 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49902- ACCESS_ONCE(ads->ctl5) = val;
49903+ ACCESS_ONCE_RW(ads->ctl5) = val;
49904 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49905- ACCESS_ONCE(ads->ctl7) = val;
49906+ ACCESS_ONCE_RW(ads->ctl7) = val;
49907 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49908- ACCESS_ONCE(ads->ctl9) = val;
49909+ ACCESS_ONCE_RW(ads->ctl9) = val;
49910
49911 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49912- ACCESS_ONCE(ads->ctl10) = checksum;
49913+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49914
49915 if (i->is_first || i->is_last) {
49916- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49917+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49918 | set11nTries(i->rates, 1)
49919 | set11nTries(i->rates, 2)
49920 | set11nTries(i->rates, 3)
49921 | (i->dur_update ? AR_DurUpdateEna : 0)
49922 | SM(0, AR_BurstDur);
49923
49924- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49925+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49926 | set11nRate(i->rates, 1)
49927 | set11nRate(i->rates, 2)
49928 | set11nRate(i->rates, 3);
49929 } else {
49930- ACCESS_ONCE(ads->ctl13) = 0;
49931- ACCESS_ONCE(ads->ctl14) = 0;
49932+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49933+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49934 }
49935
49936 ads->ctl20 = 0;
49937@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49938
49939 ctl17 = SM(i->keytype, AR_EncrType);
49940 if (!i->is_first) {
49941- ACCESS_ONCE(ads->ctl11) = 0;
49942- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49943- ACCESS_ONCE(ads->ctl15) = 0;
49944- ACCESS_ONCE(ads->ctl16) = 0;
49945- ACCESS_ONCE(ads->ctl17) = ctl17;
49946- ACCESS_ONCE(ads->ctl18) = 0;
49947- ACCESS_ONCE(ads->ctl19) = 0;
49948+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49949+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49950+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49951+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49952+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49953+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49954+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49955 return;
49956 }
49957
49958- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49959+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49960 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49961 | SM(i->txpower[0], AR_XmitPower0)
49962 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49963@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49964 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49965 ctl12 |= SM(val, AR_PAPRDChainMask);
49966
49967- ACCESS_ONCE(ads->ctl12) = ctl12;
49968- ACCESS_ONCE(ads->ctl17) = ctl17;
49969+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49970+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49971
49972- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49973+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49974 | set11nPktDurRTSCTS(i->rates, 1);
49975
49976- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49977+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49978 | set11nPktDurRTSCTS(i->rates, 3);
49979
49980- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49981+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49982 | set11nRateFlags(i->rates, 1)
49983 | set11nRateFlags(i->rates, 2)
49984 | set11nRateFlags(i->rates, 3)
49985 | SM(i->rtscts_rate, AR_RTSCTSRate);
49986
49987- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49988+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49989
49990- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49991- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49992- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49993+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49994+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49995+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49996 }
49997
49998 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49999diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50000index e82e570..8c3cf90 100644
50001--- a/drivers/net/wireless/ath/ath9k/hw.h
50002+++ b/drivers/net/wireless/ath/ath9k/hw.h
50003@@ -646,7 +646,7 @@ struct ath_hw_private_ops {
50004
50005 /* ANI */
50006 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50007-};
50008+} __no_const;
50009
50010 /**
50011 * struct ath_spec_scan - parameters for Atheros spectral scan
50012@@ -722,7 +722,7 @@ struct ath_hw_ops {
50013 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50014 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50015 #endif
50016-};
50017+} __no_const;
50018
50019 struct ath_nf_limits {
50020 s16 max;
50021diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
50022index 9ede991..a8f08fb 100644
50023--- a/drivers/net/wireless/ath/ath9k/main.c
50024+++ b/drivers/net/wireless/ath/ath9k/main.c
50025@@ -2537,16 +2537,18 @@ void ath9k_fill_chanctx_ops(void)
50026 if (!ath9k_is_chanctx_enabled())
50027 return;
50028
50029- ath9k_ops.hw_scan = ath9k_hw_scan;
50030- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50031- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50032- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50033- ath9k_ops.add_chanctx = ath9k_add_chanctx;
50034- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50035- ath9k_ops.change_chanctx = ath9k_change_chanctx;
50036- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50037- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50038- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50039+ pax_open_kernel();
50040+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50041+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50042+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50043+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50044+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50045+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50046+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50047+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50048+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50049+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50050+ pax_close_kernel();
50051 }
50052
50053 #endif
50054diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50055index 058a9f2..d5cb1ba 100644
50056--- a/drivers/net/wireless/b43/phy_lp.c
50057+++ b/drivers/net/wireless/b43/phy_lp.c
50058@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50059 {
50060 struct ssb_bus *bus = dev->dev->sdev->bus;
50061
50062- static const struct b206x_channel *chandata = NULL;
50063+ const struct b206x_channel *chandata = NULL;
50064 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50065 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50066 u16 old_comm15, scale;
50067diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50068index e566580..2c218ca 100644
50069--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50070+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50071@@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50072 */
50073 if (il3945_mod_params.disable_hw_scan) {
50074 D_INFO("Disabling hw_scan\n");
50075- il3945_mac_ops.hw_scan = NULL;
50076+ pax_open_kernel();
50077+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50078+ pax_close_kernel();
50079 }
50080
50081 D_INFO("*** LOAD DRIVER ***\n");
50082diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50083index 0ffb6ff..c0b7f0e 100644
50084--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50085+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50086@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50087 {
50088 struct iwl_priv *priv = file->private_data;
50089 char buf[64];
50090- int buf_size;
50091+ size_t buf_size;
50092 u32 offset, len;
50093
50094 memset(buf, 0, sizeof(buf));
50095@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50096 struct iwl_priv *priv = file->private_data;
50097
50098 char buf[8];
50099- int buf_size;
50100+ size_t buf_size;
50101 u32 reset_flag;
50102
50103 memset(buf, 0, sizeof(buf));
50104@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50105 {
50106 struct iwl_priv *priv = file->private_data;
50107 char buf[8];
50108- int buf_size;
50109+ size_t buf_size;
50110 int ht40;
50111
50112 memset(buf, 0, sizeof(buf));
50113@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50114 {
50115 struct iwl_priv *priv = file->private_data;
50116 char buf[8];
50117- int buf_size;
50118+ size_t buf_size;
50119 int value;
50120
50121 memset(buf, 0, sizeof(buf));
50122@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50123 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50124 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50125
50126-static const char *fmt_value = " %-30s %10u\n";
50127-static const char *fmt_hex = " %-30s 0x%02X\n";
50128-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50129-static const char *fmt_header =
50130+static const char fmt_value[] = " %-30s %10u\n";
50131+static const char fmt_hex[] = " %-30s 0x%02X\n";
50132+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50133+static const char fmt_header[] =
50134 "%-32s current cumulative delta max\n";
50135
50136 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50137@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50138 {
50139 struct iwl_priv *priv = file->private_data;
50140 char buf[8];
50141- int buf_size;
50142+ size_t buf_size;
50143 int clear;
50144
50145 memset(buf, 0, sizeof(buf));
50146@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50147 {
50148 struct iwl_priv *priv = file->private_data;
50149 char buf[8];
50150- int buf_size;
50151+ size_t buf_size;
50152 int trace;
50153
50154 memset(buf, 0, sizeof(buf));
50155@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50156 {
50157 struct iwl_priv *priv = file->private_data;
50158 char buf[8];
50159- int buf_size;
50160+ size_t buf_size;
50161 int missed;
50162
50163 memset(buf, 0, sizeof(buf));
50164@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50165
50166 struct iwl_priv *priv = file->private_data;
50167 char buf[8];
50168- int buf_size;
50169+ size_t buf_size;
50170 int plcp;
50171
50172 memset(buf, 0, sizeof(buf));
50173@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50174
50175 struct iwl_priv *priv = file->private_data;
50176 char buf[8];
50177- int buf_size;
50178+ size_t buf_size;
50179 int flush;
50180
50181 memset(buf, 0, sizeof(buf));
50182@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50183
50184 struct iwl_priv *priv = file->private_data;
50185 char buf[8];
50186- int buf_size;
50187+ size_t buf_size;
50188 int rts;
50189
50190 if (!priv->cfg->ht_params)
50191@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50192 {
50193 struct iwl_priv *priv = file->private_data;
50194 char buf[8];
50195- int buf_size;
50196+ size_t buf_size;
50197
50198 memset(buf, 0, sizeof(buf));
50199 buf_size = min(count, sizeof(buf) - 1);
50200@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50201 struct iwl_priv *priv = file->private_data;
50202 u32 event_log_flag;
50203 char buf[8];
50204- int buf_size;
50205+ size_t buf_size;
50206
50207 /* check that the interface is up */
50208 if (!iwl_is_ready(priv))
50209@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50210 struct iwl_priv *priv = file->private_data;
50211 char buf[8];
50212 u32 calib_disabled;
50213- int buf_size;
50214+ size_t buf_size;
50215
50216 memset(buf, 0, sizeof(buf));
50217 buf_size = min(count, sizeof(buf) - 1);
50218diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50219index 69935aa..c1ca128 100644
50220--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50221+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50222@@ -1836,7 +1836,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50223 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50224
50225 char buf[8];
50226- int buf_size;
50227+ size_t buf_size;
50228 u32 reset_flag;
50229
50230 memset(buf, 0, sizeof(buf));
50231@@ -1857,7 +1857,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50232 {
50233 struct iwl_trans *trans = file->private_data;
50234 char buf[8];
50235- int buf_size;
50236+ size_t buf_size;
50237 int csr;
50238
50239 memset(buf, 0, sizeof(buf));
50240diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50241index 8908be6..fe97ddd 100644
50242--- a/drivers/net/wireless/mac80211_hwsim.c
50243+++ b/drivers/net/wireless/mac80211_hwsim.c
50244@@ -3070,20 +3070,20 @@ static int __init init_mac80211_hwsim(void)
50245 if (channels < 1)
50246 return -EINVAL;
50247
50248- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50249- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50250- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50251- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50252- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50253- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50254- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50255- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50256- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50257- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50258- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50259- mac80211_hwsim_assign_vif_chanctx;
50260- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50261- mac80211_hwsim_unassign_vif_chanctx;
50262+ pax_open_kernel();
50263+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50264+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50265+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50266+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50267+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50268+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50269+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50270+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50271+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50272+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50273+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50274+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50275+ pax_close_kernel();
50276
50277 spin_lock_init(&hwsim_radio_lock);
50278 INIT_LIST_HEAD(&hwsim_radios);
50279diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50280index 60d44ce..884dd1c 100644
50281--- a/drivers/net/wireless/rndis_wlan.c
50282+++ b/drivers/net/wireless/rndis_wlan.c
50283@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50284
50285 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50286
50287- if (rts_threshold < 0 || rts_threshold > 2347)
50288+ if (rts_threshold > 2347)
50289 rts_threshold = 2347;
50290
50291 tmp = cpu_to_le32(rts_threshold);
50292diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50293index 9bb398b..b0cc047 100644
50294--- a/drivers/net/wireless/rt2x00/rt2x00.h
50295+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50296@@ -375,7 +375,7 @@ struct rt2x00_intf {
50297 * for hardware which doesn't support hardware
50298 * sequence counting.
50299 */
50300- atomic_t seqno;
50301+ atomic_unchecked_t seqno;
50302 };
50303
50304 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50305diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50306index 68b620b..92ecd9e 100644
50307--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50308+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50309@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50310 * sequence counter given by mac80211.
50311 */
50312 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50313- seqno = atomic_add_return(0x10, &intf->seqno);
50314+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50315 else
50316- seqno = atomic_read(&intf->seqno);
50317+ seqno = atomic_read_unchecked(&intf->seqno);
50318
50319 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50320 hdr->seq_ctrl |= cpu_to_le16(seqno);
50321diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50322index b661f896..ddf7d2b 100644
50323--- a/drivers/net/wireless/ti/wl1251/sdio.c
50324+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50325@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50326
50327 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50328
50329- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50330- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50331+ pax_open_kernel();
50332+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50333+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50334+ pax_close_kernel();
50335
50336 wl1251_info("using dedicated interrupt line");
50337 } else {
50338- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50339- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50340+ pax_open_kernel();
50341+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50342+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50343+ pax_close_kernel();
50344
50345 wl1251_info("using SDIO interrupt");
50346 }
50347diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50348index 144d1f8..7030936 100644
50349--- a/drivers/net/wireless/ti/wl12xx/main.c
50350+++ b/drivers/net/wireless/ti/wl12xx/main.c
50351@@ -657,7 +657,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50352 sizeof(wl->conf.mem));
50353
50354 /* read data preparation is only needed by wl127x */
50355- wl->ops->prepare_read = wl127x_prepare_read;
50356+ pax_open_kernel();
50357+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50358+ pax_close_kernel();
50359
50360 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50361 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50362@@ -682,7 +684,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50363 sizeof(wl->conf.mem));
50364
50365 /* read data preparation is only needed by wl127x */
50366- wl->ops->prepare_read = wl127x_prepare_read;
50367+ pax_open_kernel();
50368+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50369+ pax_close_kernel();
50370
50371 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50372 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50373diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50374index 717c4f5..a813aeb 100644
50375--- a/drivers/net/wireless/ti/wl18xx/main.c
50376+++ b/drivers/net/wireless/ti/wl18xx/main.c
50377@@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50378 }
50379
50380 if (!checksum_param) {
50381- wl18xx_ops.set_rx_csum = NULL;
50382- wl18xx_ops.init_vif = NULL;
50383+ pax_open_kernel();
50384+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50385+ *(void **)&wl18xx_ops.init_vif = NULL;
50386+ pax_close_kernel();
50387 }
50388
50389 /* Enable 11a Band only if we have 5G antennas */
50390diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50391index a912dc0..a8225ba 100644
50392--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50393+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50394@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50395 {
50396 struct zd_usb *usb = urb->context;
50397 struct zd_usb_interrupt *intr = &usb->intr;
50398- int len;
50399+ unsigned int len;
50400 u16 int_num;
50401
50402 ZD_ASSERT(in_interrupt());
50403diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50404index ce2e2cf..f81e500 100644
50405--- a/drivers/nfc/nfcwilink.c
50406+++ b/drivers/nfc/nfcwilink.c
50407@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50408
50409 static int nfcwilink_probe(struct platform_device *pdev)
50410 {
50411- static struct nfcwilink *drv;
50412+ struct nfcwilink *drv;
50413 int rc;
50414 __u32 protocols;
50415
50416diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
50417index 24d3d24..ff70d28 100644
50418--- a/drivers/nfc/st21nfca/st21nfca.c
50419+++ b/drivers/nfc/st21nfca/st21nfca.c
50420@@ -588,7 +588,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
50421 goto exit;
50422 }
50423
50424- gate = uid_skb->data;
50425+ memcpy(gate, uid_skb->data, uid_skb->len);
50426 *len = uid_skb->len;
50427 exit:
50428 kfree_skb(uid_skb);
50429diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
50430index 3a896c9..ac7b1c8 100644
50431--- a/drivers/of/fdt.c
50432+++ b/drivers/of/fdt.c
50433@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
50434 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
50435 return 0;
50436 }
50437- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50438+ pax_open_kernel();
50439+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50440+ pax_close_kernel();
50441 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
50442 }
50443 late_initcall(of_fdt_raw_init);
50444diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50445index d93b2b6..ae50401 100644
50446--- a/drivers/oprofile/buffer_sync.c
50447+++ b/drivers/oprofile/buffer_sync.c
50448@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50449 if (cookie == NO_COOKIE)
50450 offset = pc;
50451 if (cookie == INVALID_COOKIE) {
50452- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50453+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50454 offset = pc;
50455 }
50456 if (cookie != last_cookie) {
50457@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50458 /* add userspace sample */
50459
50460 if (!mm) {
50461- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50462+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50463 return 0;
50464 }
50465
50466 cookie = lookup_dcookie(mm, s->eip, &offset);
50467
50468 if (cookie == INVALID_COOKIE) {
50469- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50470+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50471 return 0;
50472 }
50473
50474@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50475 /* ignore backtraces if failed to add a sample */
50476 if (state == sb_bt_start) {
50477 state = sb_bt_ignore;
50478- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50479+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50480 }
50481 }
50482 release_mm(mm);
50483diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50484index c0cc4e7..44d4e54 100644
50485--- a/drivers/oprofile/event_buffer.c
50486+++ b/drivers/oprofile/event_buffer.c
50487@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50488 }
50489
50490 if (buffer_pos == buffer_size) {
50491- atomic_inc(&oprofile_stats.event_lost_overflow);
50492+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50493 return;
50494 }
50495
50496diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50497index ed2c3ec..deda85a 100644
50498--- a/drivers/oprofile/oprof.c
50499+++ b/drivers/oprofile/oprof.c
50500@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50501 if (oprofile_ops.switch_events())
50502 return;
50503
50504- atomic_inc(&oprofile_stats.multiplex_counter);
50505+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50506 start_switch_worker();
50507 }
50508
50509diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50510index ee2cfce..7f8f699 100644
50511--- a/drivers/oprofile/oprofile_files.c
50512+++ b/drivers/oprofile/oprofile_files.c
50513@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50514
50515 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50516
50517-static ssize_t timeout_read(struct file *file, char __user *buf,
50518+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50519 size_t count, loff_t *offset)
50520 {
50521 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50522diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50523index 59659ce..6c860a0 100644
50524--- a/drivers/oprofile/oprofile_stats.c
50525+++ b/drivers/oprofile/oprofile_stats.c
50526@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50527 cpu_buf->sample_invalid_eip = 0;
50528 }
50529
50530- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50531- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50532- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50533- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50534- atomic_set(&oprofile_stats.multiplex_counter, 0);
50535+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50536+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50537+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50538+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50539+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50540 }
50541
50542
50543diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50544index 1fc622b..8c48fc3 100644
50545--- a/drivers/oprofile/oprofile_stats.h
50546+++ b/drivers/oprofile/oprofile_stats.h
50547@@ -13,11 +13,11 @@
50548 #include <linux/atomic.h>
50549
50550 struct oprofile_stat_struct {
50551- atomic_t sample_lost_no_mm;
50552- atomic_t sample_lost_no_mapping;
50553- atomic_t bt_lost_no_mapping;
50554- atomic_t event_lost_overflow;
50555- atomic_t multiplex_counter;
50556+ atomic_unchecked_t sample_lost_no_mm;
50557+ atomic_unchecked_t sample_lost_no_mapping;
50558+ atomic_unchecked_t bt_lost_no_mapping;
50559+ atomic_unchecked_t event_lost_overflow;
50560+ atomic_unchecked_t multiplex_counter;
50561 };
50562
50563 extern struct oprofile_stat_struct oprofile_stats;
50564diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50565index 3f49345..c750d0b 100644
50566--- a/drivers/oprofile/oprofilefs.c
50567+++ b/drivers/oprofile/oprofilefs.c
50568@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50569
50570 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50571 {
50572- atomic_t *val = file->private_data;
50573- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50574+ atomic_unchecked_t *val = file->private_data;
50575+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50576 }
50577
50578
50579@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50580
50581
50582 int oprofilefs_create_ro_atomic(struct dentry *root,
50583- char const *name, atomic_t *val)
50584+ char const *name, atomic_unchecked_t *val)
50585 {
50586 return __oprofilefs_create_file(root, name,
50587 &atomic_ro_fops, 0444, val);
50588diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50589index bdef916..88c7dee 100644
50590--- a/drivers/oprofile/timer_int.c
50591+++ b/drivers/oprofile/timer_int.c
50592@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50593 return NOTIFY_OK;
50594 }
50595
50596-static struct notifier_block __refdata oprofile_cpu_notifier = {
50597+static struct notifier_block oprofile_cpu_notifier = {
50598 .notifier_call = oprofile_cpu_notify,
50599 };
50600
50601diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50602index 3b47080..6cd05dd 100644
50603--- a/drivers/parport/procfs.c
50604+++ b/drivers/parport/procfs.c
50605@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50606
50607 *ppos += len;
50608
50609- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50610+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50611 }
50612
50613 #ifdef CONFIG_PARPORT_1284
50614@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50615
50616 *ppos += len;
50617
50618- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50619+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50620 }
50621 #endif /* IEEE1284.3 support. */
50622
50623diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
50624index ba46e58..90cfc24 100644
50625--- a/drivers/pci/host/pci-host-generic.c
50626+++ b/drivers/pci/host/pci-host-generic.c
50627@@ -26,9 +26,9 @@
50628 #include <linux/platform_device.h>
50629
50630 struct gen_pci_cfg_bus_ops {
50631+ struct pci_ops ops;
50632 u32 bus_shift;
50633- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
50634-};
50635+} __do_const;
50636
50637 struct gen_pci_cfg_windows {
50638 struct resource res;
50639@@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
50640 }
50641
50642 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
50643+ .ops = {
50644+ .map_bus = gen_pci_map_cfg_bus_cam,
50645+ .read = pci_generic_config_read,
50646+ .write = pci_generic_config_write,
50647+ },
50648 .bus_shift = 16,
50649- .map_bus = gen_pci_map_cfg_bus_cam,
50650 };
50651
50652 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50653@@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50654 }
50655
50656 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
50657+ .ops = {
50658+ .map_bus = gen_pci_map_cfg_bus_ecam,
50659+ .read = pci_generic_config_read,
50660+ .write = pci_generic_config_write,
50661+ },
50662 .bus_shift = 20,
50663- .map_bus = gen_pci_map_cfg_bus_ecam,
50664-};
50665-
50666-static struct pci_ops gen_pci_ops = {
50667- .read = pci_generic_config_read,
50668- .write = pci_generic_config_write,
50669 };
50670
50671 static const struct of_device_id gen_pci_of_match[] = {
50672@@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
50673 .private_data = (void **)&pci,
50674 .setup = gen_pci_setup,
50675 .map_irq = of_irq_parse_and_map_pci,
50676- .ops = &gen_pci_ops,
50677 };
50678
50679 if (!pci)
50680@@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
50681
50682 of_id = of_match_node(gen_pci_of_match, np);
50683 pci->cfg.ops = of_id->data;
50684- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
50685+ hw.ops = &pci->cfg.ops->ops;
50686 pci->host.dev.parent = dev;
50687 INIT_LIST_HEAD(&pci->host.windows);
50688 INIT_LIST_HEAD(&pci->resources);
50689diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50690index 6ca2399..68d866b 100644
50691--- a/drivers/pci/hotplug/acpiphp_ibm.c
50692+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50693@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50694 goto init_cleanup;
50695 }
50696
50697- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50698+ pax_open_kernel();
50699+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50700+ pax_close_kernel();
50701 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50702
50703 return retval;
50704diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50705index 66b7bbe..26bee78 100644
50706--- a/drivers/pci/hotplug/cpcihp_generic.c
50707+++ b/drivers/pci/hotplug/cpcihp_generic.c
50708@@ -73,7 +73,6 @@ static u16 port;
50709 static unsigned int enum_bit;
50710 static u8 enum_mask;
50711
50712-static struct cpci_hp_controller_ops generic_hpc_ops;
50713 static struct cpci_hp_controller generic_hpc;
50714
50715 static int __init validate_parameters(void)
50716@@ -139,6 +138,10 @@ static int query_enum(void)
50717 return ((value & enum_mask) == enum_mask);
50718 }
50719
50720+static struct cpci_hp_controller_ops generic_hpc_ops = {
50721+ .query_enum = query_enum,
50722+};
50723+
50724 static int __init cpcihp_generic_init(void)
50725 {
50726 int status;
50727@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50728 pci_dev_put(dev);
50729
50730 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50731- generic_hpc_ops.query_enum = query_enum;
50732 generic_hpc.ops = &generic_hpc_ops;
50733
50734 status = cpci_hp_register_controller(&generic_hpc);
50735diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50736index 7ecf34e..effed62 100644
50737--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50738+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50739@@ -59,7 +59,6 @@
50740 /* local variables */
50741 static bool debug;
50742 static bool poll;
50743-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50744 static struct cpci_hp_controller zt5550_hpc;
50745
50746 /* Primary cPCI bus bridge device */
50747@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
50748 return 0;
50749 }
50750
50751+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50752+ .query_enum = zt5550_hc_query_enum,
50753+};
50754+
50755 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50756 {
50757 int status;
50758@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50759 dbg("returned from zt5550_hc_config");
50760
50761 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50762- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50763 zt5550_hpc.ops = &zt5550_hpc_ops;
50764 if (!poll) {
50765 zt5550_hpc.irq = hc_dev->irq;
50766 zt5550_hpc.irq_flags = IRQF_SHARED;
50767 zt5550_hpc.dev_id = hc_dev;
50768
50769- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50770- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50771- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50772+ pax_open_kernel();
50773+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50774+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50775+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50776+ pax_open_kernel();
50777 } else {
50778 info("using ENUM# polling mode");
50779 }
50780diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50781index 1e08ff8c..3cd145f 100644
50782--- a/drivers/pci/hotplug/cpqphp_nvram.c
50783+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50784@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
50785
50786 void compaq_nvram_init (void __iomem *rom_start)
50787 {
50788+#ifndef CONFIG_PAX_KERNEXEC
50789 if (rom_start)
50790 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50791+#endif
50792
50793 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50794
50795diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50796index 56d8486..f26113f 100644
50797--- a/drivers/pci/hotplug/pci_hotplug_core.c
50798+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50799@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50800 return -EINVAL;
50801 }
50802
50803- slot->ops->owner = owner;
50804- slot->ops->mod_name = mod_name;
50805+ pax_open_kernel();
50806+ *(struct module **)&slot->ops->owner = owner;
50807+ *(const char **)&slot->ops->mod_name = mod_name;
50808+ pax_close_kernel();
50809
50810 mutex_lock(&pci_hp_mutex);
50811 /*
50812diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
50813index 07aa722..84514b4 100644
50814--- a/drivers/pci/hotplug/pciehp_core.c
50815+++ b/drivers/pci/hotplug/pciehp_core.c
50816@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
50817 struct slot *slot = ctrl->slot;
50818 struct hotplug_slot *hotplug = NULL;
50819 struct hotplug_slot_info *info = NULL;
50820- struct hotplug_slot_ops *ops = NULL;
50821+ hotplug_slot_ops_no_const *ops = NULL;
50822 char name[SLOT_NAME_SIZE];
50823 int retval = -ENOMEM;
50824
50825diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
50826index c3e7dfc..cbd9625 100644
50827--- a/drivers/pci/msi.c
50828+++ b/drivers/pci/msi.c
50829@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
50830 {
50831 struct attribute **msi_attrs;
50832 struct attribute *msi_attr;
50833- struct device_attribute *msi_dev_attr;
50834- struct attribute_group *msi_irq_group;
50835+ device_attribute_no_const *msi_dev_attr;
50836+ attribute_group_no_const *msi_irq_group;
50837 const struct attribute_group **msi_irq_groups;
50838 struct msi_desc *entry;
50839 int ret = -ENOMEM;
50840@@ -573,7 +573,7 @@ error_attrs:
50841 count = 0;
50842 msi_attr = msi_attrs[count];
50843 while (msi_attr) {
50844- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
50845+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
50846 kfree(msi_attr->name);
50847 kfree(msi_dev_attr);
50848 ++count;
50849diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
50850index 312f23a..d21181c 100644
50851--- a/drivers/pci/pci-sysfs.c
50852+++ b/drivers/pci/pci-sysfs.c
50853@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
50854 {
50855 /* allocate attribute structure, piggyback attribute name */
50856 int name_len = write_combine ? 13 : 10;
50857- struct bin_attribute *res_attr;
50858+ bin_attribute_no_const *res_attr;
50859 int retval;
50860
50861 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
50862@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
50863 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
50864 {
50865 int retval;
50866- struct bin_attribute *attr;
50867+ bin_attribute_no_const *attr;
50868
50869 /* If the device has VPD, try to expose it in sysfs. */
50870 if (dev->vpd) {
50871@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
50872 {
50873 int retval;
50874 int rom_size = 0;
50875- struct bin_attribute *attr;
50876+ bin_attribute_no_const *attr;
50877
50878 if (!sysfs_initialized)
50879 return -EACCES;
50880diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
50881index 4091f82..7d98eef 100644
50882--- a/drivers/pci/pci.h
50883+++ b/drivers/pci/pci.h
50884@@ -99,7 +99,7 @@ struct pci_vpd_ops {
50885 struct pci_vpd {
50886 unsigned int len;
50887 const struct pci_vpd_ops *ops;
50888- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
50889+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
50890 };
50891
50892 int pci_vpd_pci22_init(struct pci_dev *dev);
50893diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
50894index 820740a..8b1c673 100644
50895--- a/drivers/pci/pcie/aspm.c
50896+++ b/drivers/pci/pcie/aspm.c
50897@@ -27,9 +27,9 @@
50898 #define MODULE_PARAM_PREFIX "pcie_aspm."
50899
50900 /* Note: those are not register definitions */
50901-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
50902-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
50903-#define ASPM_STATE_L1 (4) /* L1 state */
50904+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
50905+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
50906+#define ASPM_STATE_L1 (4U) /* L1 state */
50907 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
50908 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
50909
50910diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
50911index be35da2..ec16cdb 100644
50912--- a/drivers/pci/pcie/portdrv_pci.c
50913+++ b/drivers/pci/pcie/portdrv_pci.c
50914@@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
50915 return 0;
50916 }
50917
50918-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
50919+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
50920 /*
50921 * Boxes that should not use MSI for PCIe PME signaling.
50922 */
50923diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50924index 8d2f400..c97cc91 100644
50925--- a/drivers/pci/probe.c
50926+++ b/drivers/pci/probe.c
50927@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50928 u16 orig_cmd;
50929 struct pci_bus_region region, inverted_region;
50930
50931- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50932+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50933
50934 /* No printks while decoding is disabled! */
50935 if (!dev->mmio_always_on) {
50936diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50937index 3f155e7..0f4b1f0 100644
50938--- a/drivers/pci/proc.c
50939+++ b/drivers/pci/proc.c
50940@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50941 static int __init pci_proc_init(void)
50942 {
50943 struct pci_dev *dev = NULL;
50944+
50945+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50946+#ifdef CONFIG_GRKERNSEC_PROC_USER
50947+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50948+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50949+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50950+#endif
50951+#else
50952 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50953+#endif
50954 proc_create("devices", 0, proc_bus_pci_dir,
50955 &proc_bus_pci_dev_operations);
50956 proc_initialized = 1;
50957diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50958index b84fdd6..b89d829 100644
50959--- a/drivers/platform/chrome/chromeos_laptop.c
50960+++ b/drivers/platform/chrome/chromeos_laptop.c
50961@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50962 .callback = chromeos_laptop_dmi_matched, \
50963 .driver_data = (void *)&board_
50964
50965-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50966+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50967 {
50968 .ident = "Samsung Series 5 550",
50969 .matches = {
50970diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
50971index 3474920..acc9581 100644
50972--- a/drivers/platform/chrome/chromeos_pstore.c
50973+++ b/drivers/platform/chrome/chromeos_pstore.c
50974@@ -13,7 +13,7 @@
50975 #include <linux/platform_device.h>
50976 #include <linux/pstore_ram.h>
50977
50978-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
50979+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
50980 {
50981 /*
50982 * Today all Chromebooks/boxes ship with Google_* as version and
50983diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50984index 1e1e594..8fe59c5 100644
50985--- a/drivers/platform/x86/alienware-wmi.c
50986+++ b/drivers/platform/x86/alienware-wmi.c
50987@@ -150,7 +150,7 @@ struct wmax_led_args {
50988 } __packed;
50989
50990 static struct platform_device *platform_device;
50991-static struct device_attribute *zone_dev_attrs;
50992+static device_attribute_no_const *zone_dev_attrs;
50993 static struct attribute **zone_attrs;
50994 static struct platform_zone *zone_data;
50995
50996@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
50997 }
50998 };
50999
51000-static struct attribute_group zone_attribute_group = {
51001+static attribute_group_no_const zone_attribute_group = {
51002 .name = "rgb_zones",
51003 };
51004
51005diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51006index 7543a56..367ca8ed 100644
51007--- a/drivers/platform/x86/asus-wmi.c
51008+++ b/drivers/platform/x86/asus-wmi.c
51009@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
51010 int err;
51011 u32 retval = -1;
51012
51013+#ifdef CONFIG_GRKERNSEC_KMEM
51014+ return -EPERM;
51015+#endif
51016+
51017 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51018
51019 if (err < 0)
51020@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
51021 int err;
51022 u32 retval = -1;
51023
51024+#ifdef CONFIG_GRKERNSEC_KMEM
51025+ return -EPERM;
51026+#endif
51027+
51028 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51029 &retval);
51030
51031@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
51032 union acpi_object *obj;
51033 acpi_status status;
51034
51035+#ifdef CONFIG_GRKERNSEC_KMEM
51036+ return -EPERM;
51037+#endif
51038+
51039 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51040 1, asus->debug.method_id,
51041 &input, &output);
51042diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
51043index bceb30b..bf063d4 100644
51044--- a/drivers/platform/x86/compal-laptop.c
51045+++ b/drivers/platform/x86/compal-laptop.c
51046@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
51047 return 1;
51048 }
51049
51050-static struct dmi_system_id __initdata compal_dmi_table[] = {
51051+static const struct dmi_system_id __initconst compal_dmi_table[] = {
51052 {
51053 .ident = "FL90/IFL90",
51054 .matches = {
51055diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
51056index 458e6c9..089aee7 100644
51057--- a/drivers/platform/x86/hdaps.c
51058+++ b/drivers/platform/x86/hdaps.c
51059@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
51060 "ThinkPad T42p", so the order of the entries matters.
51061 If your ThinkPad is not recognized, please update to latest
51062 BIOS. This is especially the case for some R52 ThinkPads. */
51063-static struct dmi_system_id __initdata hdaps_whitelist[] = {
51064+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
51065 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
51066 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
51067 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
51068diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
51069index 97c2be1..2ee50ce 100644
51070--- a/drivers/platform/x86/ibm_rtl.c
51071+++ b/drivers/platform/x86/ibm_rtl.c
51072@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
51073 }
51074
51075
51076-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
51077+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
51078 { \
51079 .matches = { \
51080 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
51081diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
51082index a4a4258..a58a04c 100644
51083--- a/drivers/platform/x86/intel_oaktrail.c
51084+++ b/drivers/platform/x86/intel_oaktrail.c
51085@@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
51086 return 0;
51087 }
51088
51089-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
51090+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
51091 {
51092 .ident = "OakTrail platform",
51093 .matches = {
51094diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51095index 0859877..59d596d 100644
51096--- a/drivers/platform/x86/msi-laptop.c
51097+++ b/drivers/platform/x86/msi-laptop.c
51098@@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
51099 return 1;
51100 }
51101
51102-static struct dmi_system_id __initdata msi_dmi_table[] = {
51103+static const struct dmi_system_id __initconst msi_dmi_table[] = {
51104 {
51105 .ident = "MSI S270",
51106 .matches = {
51107@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51108
51109 if (!quirks->ec_read_only) {
51110 /* allow userland write sysfs file */
51111- dev_attr_bluetooth.store = store_bluetooth;
51112- dev_attr_wlan.store = store_wlan;
51113- dev_attr_threeg.store = store_threeg;
51114- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51115- dev_attr_wlan.attr.mode |= S_IWUSR;
51116- dev_attr_threeg.attr.mode |= S_IWUSR;
51117+ pax_open_kernel();
51118+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51119+ *(void **)&dev_attr_wlan.store = store_wlan;
51120+ *(void **)&dev_attr_threeg.store = store_threeg;
51121+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51122+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51123+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51124+ pax_close_kernel();
51125 }
51126
51127 /* disable hardware control by fn key */
51128diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51129index 6d2bac0..ec2b029 100644
51130--- a/drivers/platform/x86/msi-wmi.c
51131+++ b/drivers/platform/x86/msi-wmi.c
51132@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51133 static void msi_wmi_notify(u32 value, void *context)
51134 {
51135 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51136- static struct key_entry *key;
51137+ struct key_entry *key;
51138 union acpi_object *obj;
51139 acpi_status status;
51140
51141diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
51142index 9e701b2..c68a7b5 100644
51143--- a/drivers/platform/x86/samsung-laptop.c
51144+++ b/drivers/platform/x86/samsung-laptop.c
51145@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
51146 return 0;
51147 }
51148
51149-static struct dmi_system_id __initdata samsung_dmi_table[] = {
51150+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
51151 {
51152 .matches = {
51153 DMI_MATCH(DMI_SYS_VENDOR,
51154diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
51155index e6aac72..e11ff24 100644
51156--- a/drivers/platform/x86/samsung-q10.c
51157+++ b/drivers/platform/x86/samsung-q10.c
51158@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
51159 return 1;
51160 }
51161
51162-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
51163+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
51164 {
51165 .ident = "Samsung Q10",
51166 .matches = {
51167diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51168index e51c1e7..71bb385 100644
51169--- a/drivers/platform/x86/sony-laptop.c
51170+++ b/drivers/platform/x86/sony-laptop.c
51171@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51172 }
51173
51174 /* High speed charging function */
51175-static struct device_attribute *hsc_handle;
51176+static device_attribute_no_const *hsc_handle;
51177
51178 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51179 struct device_attribute *attr,
51180@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51181 }
51182
51183 /* low battery function */
51184-static struct device_attribute *lowbatt_handle;
51185+static device_attribute_no_const *lowbatt_handle;
51186
51187 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51188 struct device_attribute *attr,
51189@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51190 }
51191
51192 /* fan speed function */
51193-static struct device_attribute *fan_handle, *hsf_handle;
51194+static device_attribute_no_const *fan_handle, *hsf_handle;
51195
51196 static ssize_t sony_nc_hsfan_store(struct device *dev,
51197 struct device_attribute *attr,
51198@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51199 }
51200
51201 /* USB charge function */
51202-static struct device_attribute *uc_handle;
51203+static device_attribute_no_const *uc_handle;
51204
51205 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51206 struct device_attribute *attr,
51207@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51208 }
51209
51210 /* Panel ID function */
51211-static struct device_attribute *panel_handle;
51212+static device_attribute_no_const *panel_handle;
51213
51214 static ssize_t sony_nc_panelid_show(struct device *dev,
51215 struct device_attribute *attr, char *buffer)
51216@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51217 }
51218
51219 /* smart connect function */
51220-static struct device_attribute *sc_handle;
51221+static device_attribute_no_const *sc_handle;
51222
51223 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51224 struct device_attribute *attr,
51225@@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
51226 .drv.pm = &sony_pic_pm,
51227 };
51228
51229-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
51230+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
51231 {
51232 .ident = "Sony Vaio",
51233 .matches = {
51234diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51235index 3b8ceee..e18652c 100644
51236--- a/drivers/platform/x86/thinkpad_acpi.c
51237+++ b/drivers/platform/x86/thinkpad_acpi.c
51238@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
51239 return 0;
51240 }
51241
51242-void static hotkey_mask_warn_incomplete_mask(void)
51243+static void hotkey_mask_warn_incomplete_mask(void)
51244 {
51245 /* log only what the user can fix... */
51246 const u32 wantedmask = hotkey_driver_mask &
51247@@ -2437,10 +2437,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51248 && !tp_features.bright_unkfw)
51249 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51250 }
51251+}
51252
51253 #undef TPACPI_COMPARE_KEY
51254 #undef TPACPI_MAY_SEND_KEY
51255-}
51256
51257 /*
51258 * Polling driver
51259diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51260index 438d4c7..ca8a2fb 100644
51261--- a/drivers/pnp/pnpbios/bioscalls.c
51262+++ b/drivers/pnp/pnpbios/bioscalls.c
51263@@ -59,7 +59,7 @@ do { \
51264 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51265 } while(0)
51266
51267-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51268+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51269 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51270
51271 /*
51272@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51273
51274 cpu = get_cpu();
51275 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51276+
51277+ pax_open_kernel();
51278 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51279+ pax_close_kernel();
51280
51281 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51282 spin_lock_irqsave(&pnp_bios_lock, flags);
51283@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51284 :"memory");
51285 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51286
51287+ pax_open_kernel();
51288 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51289+ pax_close_kernel();
51290+
51291 put_cpu();
51292
51293 /* If we get here and this is set then the PnP BIOS faulted on us. */
51294@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51295 return status;
51296 }
51297
51298-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51299+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51300 {
51301 int i;
51302
51303@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51304 pnp_bios_callpoint.offset = header->fields.pm16offset;
51305 pnp_bios_callpoint.segment = PNP_CS16;
51306
51307+ pax_open_kernel();
51308+
51309 for_each_possible_cpu(i) {
51310 struct desc_struct *gdt = get_cpu_gdt_table(i);
51311 if (!gdt)
51312@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51313 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51314 (unsigned long)__va(header->fields.pm16dseg));
51315 }
51316+
51317+ pax_close_kernel();
51318 }
51319diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
51320index facd43b..b291260 100644
51321--- a/drivers/pnp/pnpbios/core.c
51322+++ b/drivers/pnp/pnpbios/core.c
51323@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
51324 return 0;
51325 }
51326
51327-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
51328+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
51329 { /* PnPBIOS GPF on boot */
51330 .callback = exploding_pnp_bios,
51331 .ident = "Higraded P14H",
51332diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51333index 0c52e2a..3421ab7 100644
51334--- a/drivers/power/pda_power.c
51335+++ b/drivers/power/pda_power.c
51336@@ -37,7 +37,11 @@ static int polling;
51337
51338 #if IS_ENABLED(CONFIG_USB_PHY)
51339 static struct usb_phy *transceiver;
51340-static struct notifier_block otg_nb;
51341+static int otg_handle_notification(struct notifier_block *nb,
51342+ unsigned long event, void *unused);
51343+static struct notifier_block otg_nb = {
51344+ .notifier_call = otg_handle_notification
51345+};
51346 #endif
51347
51348 static struct regulator *ac_draw;
51349@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51350
51351 #if IS_ENABLED(CONFIG_USB_PHY)
51352 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51353- otg_nb.notifier_call = otg_handle_notification;
51354 ret = usb_register_notifier(transceiver, &otg_nb);
51355 if (ret) {
51356 dev_err(dev, "failure to register otg notifier\n");
51357diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51358index cc439fd..8fa30df 100644
51359--- a/drivers/power/power_supply.h
51360+++ b/drivers/power/power_supply.h
51361@@ -16,12 +16,12 @@ struct power_supply;
51362
51363 #ifdef CONFIG_SYSFS
51364
51365-extern void power_supply_init_attrs(struct device_type *dev_type);
51366+extern void power_supply_init_attrs(void);
51367 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51368
51369 #else
51370
51371-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51372+static inline void power_supply_init_attrs(void) {}
51373 #define power_supply_uevent NULL
51374
51375 #endif /* CONFIG_SYSFS */
51376diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51377index 694e8cd..9f03483 100644
51378--- a/drivers/power/power_supply_core.c
51379+++ b/drivers/power/power_supply_core.c
51380@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51381 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51382 EXPORT_SYMBOL_GPL(power_supply_notifier);
51383
51384-static struct device_type power_supply_dev_type;
51385+extern const struct attribute_group *power_supply_attr_groups[];
51386+static struct device_type power_supply_dev_type = {
51387+ .groups = power_supply_attr_groups,
51388+};
51389
51390 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51391 struct power_supply *supply)
51392@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
51393 return PTR_ERR(power_supply_class);
51394
51395 power_supply_class->dev_uevent = power_supply_uevent;
51396- power_supply_init_attrs(&power_supply_dev_type);
51397+ power_supply_init_attrs();
51398
51399 return 0;
51400 }
51401diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51402index 62653f5..d0bb485 100644
51403--- a/drivers/power/power_supply_sysfs.c
51404+++ b/drivers/power/power_supply_sysfs.c
51405@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
51406 .is_visible = power_supply_attr_is_visible,
51407 };
51408
51409-static const struct attribute_group *power_supply_attr_groups[] = {
51410+const struct attribute_group *power_supply_attr_groups[] = {
51411 &power_supply_attr_group,
51412 NULL,
51413 };
51414
51415-void power_supply_init_attrs(struct device_type *dev_type)
51416+void power_supply_init_attrs(void)
51417 {
51418 int i;
51419
51420- dev_type->groups = power_supply_attr_groups;
51421-
51422 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51423 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51424 }
51425diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51426index 84419af..268ede8 100644
51427--- a/drivers/powercap/powercap_sys.c
51428+++ b/drivers/powercap/powercap_sys.c
51429@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51430 struct device_attribute name_attr;
51431 };
51432
51433+static ssize_t show_constraint_name(struct device *dev,
51434+ struct device_attribute *dev_attr,
51435+ char *buf);
51436+
51437 static struct powercap_constraint_attr
51438- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51439+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51440+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51441+ .power_limit_attr = {
51442+ .attr = {
51443+ .name = NULL,
51444+ .mode = S_IWUSR | S_IRUGO
51445+ },
51446+ .show = show_constraint_power_limit_uw,
51447+ .store = store_constraint_power_limit_uw
51448+ },
51449+
51450+ .time_window_attr = {
51451+ .attr = {
51452+ .name = NULL,
51453+ .mode = S_IWUSR | S_IRUGO
51454+ },
51455+ .show = show_constraint_time_window_us,
51456+ .store = store_constraint_time_window_us
51457+ },
51458+
51459+ .max_power_attr = {
51460+ .attr = {
51461+ .name = NULL,
51462+ .mode = S_IRUGO
51463+ },
51464+ .show = show_constraint_max_power_uw,
51465+ .store = NULL
51466+ },
51467+
51468+ .min_power_attr = {
51469+ .attr = {
51470+ .name = NULL,
51471+ .mode = S_IRUGO
51472+ },
51473+ .show = show_constraint_min_power_uw,
51474+ .store = NULL
51475+ },
51476+
51477+ .max_time_window_attr = {
51478+ .attr = {
51479+ .name = NULL,
51480+ .mode = S_IRUGO
51481+ },
51482+ .show = show_constraint_max_time_window_us,
51483+ .store = NULL
51484+ },
51485+
51486+ .min_time_window_attr = {
51487+ .attr = {
51488+ .name = NULL,
51489+ .mode = S_IRUGO
51490+ },
51491+ .show = show_constraint_min_time_window_us,
51492+ .store = NULL
51493+ },
51494+
51495+ .name_attr = {
51496+ .attr = {
51497+ .name = NULL,
51498+ .mode = S_IRUGO
51499+ },
51500+ .show = show_constraint_name,
51501+ .store = NULL
51502+ }
51503+ }
51504+};
51505
51506 /* A list of powercap control_types */
51507 static LIST_HEAD(powercap_cntrl_list);
51508@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51509 }
51510
51511 static int create_constraint_attribute(int id, const char *name,
51512- int mode,
51513- struct device_attribute *dev_attr,
51514- ssize_t (*show)(struct device *,
51515- struct device_attribute *, char *),
51516- ssize_t (*store)(struct device *,
51517- struct device_attribute *,
51518- const char *, size_t)
51519- )
51520+ struct device_attribute *dev_attr)
51521 {
51522+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51523
51524- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51525- id, name);
51526- if (!dev_attr->attr.name)
51527+ if (!name)
51528 return -ENOMEM;
51529- dev_attr->attr.mode = mode;
51530- dev_attr->show = show;
51531- dev_attr->store = store;
51532+
51533+ pax_open_kernel();
51534+ *(const char **)&dev_attr->attr.name = name;
51535+ pax_close_kernel();
51536
51537 return 0;
51538 }
51539@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51540
51541 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51542 ret = create_constraint_attribute(i, "power_limit_uw",
51543- S_IWUSR | S_IRUGO,
51544- &constraint_attrs[i].power_limit_attr,
51545- show_constraint_power_limit_uw,
51546- store_constraint_power_limit_uw);
51547+ &constraint_attrs[i].power_limit_attr);
51548 if (ret)
51549 goto err_alloc;
51550 ret = create_constraint_attribute(i, "time_window_us",
51551- S_IWUSR | S_IRUGO,
51552- &constraint_attrs[i].time_window_attr,
51553- show_constraint_time_window_us,
51554- store_constraint_time_window_us);
51555+ &constraint_attrs[i].time_window_attr);
51556 if (ret)
51557 goto err_alloc;
51558- ret = create_constraint_attribute(i, "name", S_IRUGO,
51559- &constraint_attrs[i].name_attr,
51560- show_constraint_name,
51561- NULL);
51562+ ret = create_constraint_attribute(i, "name",
51563+ &constraint_attrs[i].name_attr);
51564 if (ret)
51565 goto err_alloc;
51566- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51567- &constraint_attrs[i].max_power_attr,
51568- show_constraint_max_power_uw,
51569- NULL);
51570+ ret = create_constraint_attribute(i, "max_power_uw",
51571+ &constraint_attrs[i].max_power_attr);
51572 if (ret)
51573 goto err_alloc;
51574- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51575- &constraint_attrs[i].min_power_attr,
51576- show_constraint_min_power_uw,
51577- NULL);
51578+ ret = create_constraint_attribute(i, "min_power_uw",
51579+ &constraint_attrs[i].min_power_attr);
51580 if (ret)
51581 goto err_alloc;
51582 ret = create_constraint_attribute(i, "max_time_window_us",
51583- S_IRUGO,
51584- &constraint_attrs[i].max_time_window_attr,
51585- show_constraint_max_time_window_us,
51586- NULL);
51587+ &constraint_attrs[i].max_time_window_attr);
51588 if (ret)
51589 goto err_alloc;
51590 ret = create_constraint_attribute(i, "min_time_window_us",
51591- S_IRUGO,
51592- &constraint_attrs[i].min_time_window_attr,
51593- show_constraint_min_time_window_us,
51594- NULL);
51595+ &constraint_attrs[i].min_time_window_attr);
51596 if (ret)
51597 goto err_alloc;
51598
51599@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51600 power_zone->zone_dev_attrs[count++] =
51601 &dev_attr_max_energy_range_uj.attr;
51602 if (power_zone->ops->get_energy_uj) {
51603+ pax_open_kernel();
51604 if (power_zone->ops->reset_energy_uj)
51605- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51606+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51607 else
51608- dev_attr_energy_uj.attr.mode = S_IRUGO;
51609+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51610+ pax_close_kernel();
51611 power_zone->zone_dev_attrs[count++] =
51612 &dev_attr_energy_uj.attr;
51613 }
51614diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51615index 9c5d414..c7900ce 100644
51616--- a/drivers/ptp/ptp_private.h
51617+++ b/drivers/ptp/ptp_private.h
51618@@ -51,7 +51,7 @@ struct ptp_clock {
51619 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51620 wait_queue_head_t tsev_wq;
51621 int defunct; /* tells readers to go away when clock is being removed */
51622- struct device_attribute *pin_dev_attr;
51623+ device_attribute_no_const *pin_dev_attr;
51624 struct attribute **pin_attr;
51625 struct attribute_group pin_attr_group;
51626 };
51627diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51628index 302e626..12579af 100644
51629--- a/drivers/ptp/ptp_sysfs.c
51630+++ b/drivers/ptp/ptp_sysfs.c
51631@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51632 goto no_pin_attr;
51633
51634 for (i = 0; i < n_pins; i++) {
51635- struct device_attribute *da = &ptp->pin_dev_attr[i];
51636+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51637 sysfs_attr_init(&da->attr);
51638 da->attr.name = info->pin_config[i].name;
51639 da->attr.mode = 0644;
51640diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51641index a4a8a6d..a3456f4 100644
51642--- a/drivers/regulator/core.c
51643+++ b/drivers/regulator/core.c
51644@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51645 const struct regulation_constraints *constraints = NULL;
51646 const struct regulator_init_data *init_data;
51647 struct regulator_config *config = NULL;
51648- static atomic_t regulator_no = ATOMIC_INIT(-1);
51649+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
51650 struct regulator_dev *rdev;
51651 struct device *dev;
51652 int ret, i;
51653@@ -3613,7 +3613,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51654 rdev->dev.class = &regulator_class;
51655 rdev->dev.parent = dev;
51656 dev_set_name(&rdev->dev, "regulator.%lu",
51657- (unsigned long) atomic_inc_return(&regulator_no));
51658+ (unsigned long) atomic_inc_return_unchecked(&regulator_no));
51659 ret = device_register(&rdev->dev);
51660 if (ret != 0) {
51661 put_device(&rdev->dev);
51662diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51663index 7eee2ca..4024513 100644
51664--- a/drivers/regulator/max8660.c
51665+++ b/drivers/regulator/max8660.c
51666@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51667 max8660->shadow_regs[MAX8660_OVER1] = 5;
51668 } else {
51669 /* Otherwise devices can be toggled via software */
51670- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51671- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51672+ pax_open_kernel();
51673+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51674+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51675+ pax_close_kernel();
51676 }
51677
51678 /*
51679diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51680index c3d55c2..0dddfe6 100644
51681--- a/drivers/regulator/max8973-regulator.c
51682+++ b/drivers/regulator/max8973-regulator.c
51683@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51684 if (!pdata || !pdata->enable_ext_control) {
51685 max->desc.enable_reg = MAX8973_VOUT;
51686 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51687- max->ops.enable = regulator_enable_regmap;
51688- max->ops.disable = regulator_disable_regmap;
51689- max->ops.is_enabled = regulator_is_enabled_regmap;
51690+ pax_open_kernel();
51691+ *(void **)&max->ops.enable = regulator_enable_regmap;
51692+ *(void **)&max->ops.disable = regulator_disable_regmap;
51693+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51694+ pax_close_kernel();
51695 }
51696
51697 if (pdata) {
51698diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51699index 0d17c92..a29f627 100644
51700--- a/drivers/regulator/mc13892-regulator.c
51701+++ b/drivers/regulator/mc13892-regulator.c
51702@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51703 mc13xxx_unlock(mc13892);
51704
51705 /* update mc13892_vcam ops */
51706- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51707+ pax_open_kernel();
51708+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51709 sizeof(struct regulator_ops));
51710- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51711- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51712+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51713+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51714+ pax_close_kernel();
51715 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
51716
51717 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51718diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51719index 5b2e761..c8c8a4a 100644
51720--- a/drivers/rtc/rtc-cmos.c
51721+++ b/drivers/rtc/rtc-cmos.c
51722@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51723 hpet_rtc_timer_init();
51724
51725 /* export at least the first block of NVRAM */
51726- nvram.size = address_space - NVRAM_OFFSET;
51727+ pax_open_kernel();
51728+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51729+ pax_close_kernel();
51730 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51731 if (retval < 0) {
51732 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51733diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51734index 799c34b..8e9786a 100644
51735--- a/drivers/rtc/rtc-dev.c
51736+++ b/drivers/rtc/rtc-dev.c
51737@@ -16,6 +16,7 @@
51738 #include <linux/module.h>
51739 #include <linux/rtc.h>
51740 #include <linux/sched.h>
51741+#include <linux/grsecurity.h>
51742 #include "rtc-core.h"
51743
51744 static dev_t rtc_devt;
51745@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51746 if (copy_from_user(&tm, uarg, sizeof(tm)))
51747 return -EFAULT;
51748
51749+ gr_log_timechange();
51750+
51751 return rtc_set_time(rtc, &tm);
51752
51753 case RTC_PIE_ON:
51754diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51755index 4ffabb3..1f87fca 100644
51756--- a/drivers/rtc/rtc-ds1307.c
51757+++ b/drivers/rtc/rtc-ds1307.c
51758@@ -107,7 +107,7 @@ struct ds1307 {
51759 u8 offset; /* register's offset */
51760 u8 regs[11];
51761 u16 nvram_offset;
51762- struct bin_attribute *nvram;
51763+ bin_attribute_no_const *nvram;
51764 enum ds_type type;
51765 unsigned long flags;
51766 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51767diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51768index 90abb5b..e0bf6dd 100644
51769--- a/drivers/rtc/rtc-m48t59.c
51770+++ b/drivers/rtc/rtc-m48t59.c
51771@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51772 if (IS_ERR(m48t59->rtc))
51773 return PTR_ERR(m48t59->rtc);
51774
51775- m48t59_nvram_attr.size = pdata->offset;
51776+ pax_open_kernel();
51777+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51778+ pax_close_kernel();
51779
51780 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51781 if (ret)
51782diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51783index e693af6..2e525b6 100644
51784--- a/drivers/scsi/bfa/bfa_fcpim.h
51785+++ b/drivers/scsi/bfa/bfa_fcpim.h
51786@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51787
51788 struct bfa_itn_s {
51789 bfa_isr_func_t isr;
51790-};
51791+} __no_const;
51792
51793 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51794 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51795diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51796index 0f19455..ef7adb5 100644
51797--- a/drivers/scsi/bfa/bfa_fcs.c
51798+++ b/drivers/scsi/bfa/bfa_fcs.c
51799@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51800 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51801
51802 static struct bfa_fcs_mod_s fcs_modules[] = {
51803- { bfa_fcs_port_attach, NULL, NULL },
51804- { bfa_fcs_uf_attach, NULL, NULL },
51805- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51806- bfa_fcs_fabric_modexit },
51807+ {
51808+ .attach = bfa_fcs_port_attach,
51809+ .modinit = NULL,
51810+ .modexit = NULL
51811+ },
51812+ {
51813+ .attach = bfa_fcs_uf_attach,
51814+ .modinit = NULL,
51815+ .modexit = NULL
51816+ },
51817+ {
51818+ .attach = bfa_fcs_fabric_attach,
51819+ .modinit = bfa_fcs_fabric_modinit,
51820+ .modexit = bfa_fcs_fabric_modexit
51821+ },
51822 };
51823
51824 /*
51825diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51826index ff75ef8..2dfe00a 100644
51827--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51828+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51829@@ -89,15 +89,26 @@ static struct {
51830 void (*offline) (struct bfa_fcs_lport_s *port);
51831 } __port_action[] = {
51832 {
51833- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51834- bfa_fcs_lport_unknown_offline}, {
51835- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51836- bfa_fcs_lport_fab_offline}, {
51837- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51838- bfa_fcs_lport_n2n_offline}, {
51839- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51840- bfa_fcs_lport_loop_offline},
51841- };
51842+ .init = bfa_fcs_lport_unknown_init,
51843+ .online = bfa_fcs_lport_unknown_online,
51844+ .offline = bfa_fcs_lport_unknown_offline
51845+ },
51846+ {
51847+ .init = bfa_fcs_lport_fab_init,
51848+ .online = bfa_fcs_lport_fab_online,
51849+ .offline = bfa_fcs_lport_fab_offline
51850+ },
51851+ {
51852+ .init = bfa_fcs_lport_n2n_init,
51853+ .online = bfa_fcs_lport_n2n_online,
51854+ .offline = bfa_fcs_lport_n2n_offline
51855+ },
51856+ {
51857+ .init = bfa_fcs_lport_loop_init,
51858+ .online = bfa_fcs_lport_loop_online,
51859+ .offline = bfa_fcs_lport_loop_offline
51860+ },
51861+};
51862
51863 /*
51864 * fcs_port_sm FCS logical port state machine
51865diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51866index a38aafa0..fe8f03b 100644
51867--- a/drivers/scsi/bfa/bfa_ioc.h
51868+++ b/drivers/scsi/bfa/bfa_ioc.h
51869@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51870 bfa_ioc_disable_cbfn_t disable_cbfn;
51871 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51872 bfa_ioc_reset_cbfn_t reset_cbfn;
51873-};
51874+} __no_const;
51875
51876 /*
51877 * IOC event notification mechanism.
51878@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51879 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51880 enum bfi_ioc_state fwstate);
51881 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51882-};
51883+} __no_const;
51884
51885 /*
51886 * Queue element to wait for room in request queue. FIFO order is
51887diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51888index a14c784..6de6790 100644
51889--- a/drivers/scsi/bfa/bfa_modules.h
51890+++ b/drivers/scsi/bfa/bfa_modules.h
51891@@ -78,12 +78,12 @@ enum {
51892 \
51893 extern struct bfa_module_s hal_mod_ ## __mod; \
51894 struct bfa_module_s hal_mod_ ## __mod = { \
51895- bfa_ ## __mod ## _meminfo, \
51896- bfa_ ## __mod ## _attach, \
51897- bfa_ ## __mod ## _detach, \
51898- bfa_ ## __mod ## _start, \
51899- bfa_ ## __mod ## _stop, \
51900- bfa_ ## __mod ## _iocdisable, \
51901+ .meminfo = bfa_ ## __mod ## _meminfo, \
51902+ .attach = bfa_ ## __mod ## _attach, \
51903+ .detach = bfa_ ## __mod ## _detach, \
51904+ .start = bfa_ ## __mod ## _start, \
51905+ .stop = bfa_ ## __mod ## _stop, \
51906+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51907 }
51908
51909 #define BFA_CACHELINE_SZ (256)
51910diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51911index 045c4e1..13de803 100644
51912--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51913+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51914@@ -33,8 +33,8 @@
51915 */
51916 #include "libfcoe.h"
51917
51918-static atomic_t ctlr_num;
51919-static atomic_t fcf_num;
51920+static atomic_unchecked_t ctlr_num;
51921+static atomic_unchecked_t fcf_num;
51922
51923 /*
51924 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51925@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51926 if (!ctlr)
51927 goto out;
51928
51929- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51930+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51931 ctlr->f = f;
51932 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51933 INIT_LIST_HEAD(&ctlr->fcfs);
51934@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51935 fcf->dev.parent = &ctlr->dev;
51936 fcf->dev.bus = &fcoe_bus_type;
51937 fcf->dev.type = &fcoe_fcf_device_type;
51938- fcf->id = atomic_inc_return(&fcf_num) - 1;
51939+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51940 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51941
51942 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51943@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51944 {
51945 int error;
51946
51947- atomic_set(&ctlr_num, 0);
51948- atomic_set(&fcf_num, 0);
51949+ atomic_set_unchecked(&ctlr_num, 0);
51950+ atomic_set_unchecked(&fcf_num, 0);
51951
51952 error = bus_register(&fcoe_bus_type);
51953 if (error)
51954diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
51955index 8bb173e..20236b4 100644
51956--- a/drivers/scsi/hosts.c
51957+++ b/drivers/scsi/hosts.c
51958@@ -42,7 +42,7 @@
51959 #include "scsi_logging.h"
51960
51961
51962-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51963+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51964
51965
51966 static void scsi_host_cls_release(struct device *dev)
51967@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
51968 * subtract one because we increment first then return, but we need to
51969 * know what the next host number was before increment
51970 */
51971- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
51972+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
51973 shost->dma_channel = 0xff;
51974
51975 /* These three are default values which can be overridden */
51976diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
51977index a1cfbd3..d7f8ebc 100644
51978--- a/drivers/scsi/hpsa.c
51979+++ b/drivers/scsi/hpsa.c
51980@@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
51981 struct reply_queue_buffer *rq = &h->reply_queue[q];
51982
51983 if (h->transMethod & CFGTBL_Trans_io_accel1)
51984- return h->access.command_completed(h, q);
51985+ return h->access->command_completed(h, q);
51986
51987 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
51988- return h->access.command_completed(h, q);
51989+ return h->access->command_completed(h, q);
51990
51991 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
51992 a = rq->head[rq->current_entry];
51993@@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
51994 break;
51995 default:
51996 set_performant_mode(h, c);
51997- h->access.submit_command(h, c);
51998+ h->access->submit_command(h, c);
51999 }
52000 }
52001
52002@@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
52003
52004 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52005 {
52006- return h->access.command_completed(h, q);
52007+ return h->access->command_completed(h, q);
52008 }
52009
52010 static inline bool interrupt_pending(struct ctlr_info *h)
52011 {
52012- return h->access.intr_pending(h);
52013+ return h->access->intr_pending(h);
52014 }
52015
52016 static inline long interrupt_not_for_us(struct ctlr_info *h)
52017 {
52018- return (h->access.intr_pending(h) == 0) ||
52019+ return (h->access->intr_pending(h) == 0) ||
52020 (h->interrupts_enabled == 0);
52021 }
52022
52023@@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52024 if (prod_index < 0)
52025 return prod_index;
52026 h->product_name = products[prod_index].product_name;
52027- h->access = *(products[prod_index].access);
52028+ h->access = products[prod_index].access;
52029
52030 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52031 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52032@@ -6649,7 +6649,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52033 unsigned long flags;
52034 u32 lockup_detected;
52035
52036- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52037+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52038 spin_lock_irqsave(&h->lock, flags);
52039 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52040 if (!lockup_detected) {
52041@@ -6924,7 +6924,7 @@ reinit_after_soft_reset:
52042 }
52043
52044 /* make sure the board interrupts are off */
52045- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52046+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52047
52048 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52049 goto clean2;
52050@@ -6960,7 +6960,7 @@ reinit_after_soft_reset:
52051 * fake ones to scoop up any residual completions.
52052 */
52053 spin_lock_irqsave(&h->lock, flags);
52054- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52055+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52056 spin_unlock_irqrestore(&h->lock, flags);
52057 hpsa_free_irqs(h);
52058 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
52059@@ -6979,9 +6979,9 @@ reinit_after_soft_reset:
52060 dev_info(&h->pdev->dev, "Board READY.\n");
52061 dev_info(&h->pdev->dev,
52062 "Waiting for stale completions to drain.\n");
52063- h->access.set_intr_mask(h, HPSA_INTR_ON);
52064+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52065 msleep(10000);
52066- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52067+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52068
52069 rc = controller_reset_failed(h->cfgtable);
52070 if (rc)
52071@@ -7006,7 +7006,7 @@ reinit_after_soft_reset:
52072
52073
52074 /* Turn the interrupts on so we can service requests */
52075- h->access.set_intr_mask(h, HPSA_INTR_ON);
52076+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52077
52078 hpsa_hba_inquiry(h);
52079 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52080@@ -7079,7 +7079,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52081 * To write all data in the battery backed cache to disks
52082 */
52083 hpsa_flush_cache(h);
52084- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52085+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52086 hpsa_free_irqs_and_disable_msix(h);
52087 }
52088
52089@@ -7200,7 +7200,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52090 CFGTBL_Trans_enable_directed_msix |
52091 (trans_support & (CFGTBL_Trans_io_accel1 |
52092 CFGTBL_Trans_io_accel2));
52093- struct access_method access = SA5_performant_access;
52094+ struct access_method *access = &SA5_performant_access;
52095
52096 /* This is a bit complicated. There are 8 registers on
52097 * the controller which we write to to tell it 8 different
52098@@ -7242,7 +7242,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52099 * perform the superfluous readl() after each command submission.
52100 */
52101 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52102- access = SA5_performant_access_no_read;
52103+ access = &SA5_performant_access_no_read;
52104
52105 /* Controller spec: zero out this buffer. */
52106 for (i = 0; i < h->nreply_queues; i++)
52107@@ -7272,12 +7272,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52108 * enable outbound interrupt coalescing in accelerator mode;
52109 */
52110 if (trans_support & CFGTBL_Trans_io_accel1) {
52111- access = SA5_ioaccel_mode1_access;
52112+ access = &SA5_ioaccel_mode1_access;
52113 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52114 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52115 } else {
52116 if (trans_support & CFGTBL_Trans_io_accel2) {
52117- access = SA5_ioaccel_mode2_access;
52118+ access = &SA5_ioaccel_mode2_access;
52119 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52120 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52121 }
52122diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52123index 6577130..955f9a4 100644
52124--- a/drivers/scsi/hpsa.h
52125+++ b/drivers/scsi/hpsa.h
52126@@ -143,7 +143,7 @@ struct ctlr_info {
52127 unsigned int msix_vector;
52128 unsigned int msi_vector;
52129 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52130- struct access_method access;
52131+ struct access_method *access;
52132 char hba_mode_enabled;
52133
52134 /* queue and queue Info */
52135@@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52136 }
52137
52138 static struct access_method SA5_access = {
52139- SA5_submit_command,
52140- SA5_intr_mask,
52141- SA5_intr_pending,
52142- SA5_completed,
52143+ .submit_command = SA5_submit_command,
52144+ .set_intr_mask = SA5_intr_mask,
52145+ .intr_pending = SA5_intr_pending,
52146+ .command_completed = SA5_completed,
52147 };
52148
52149 static struct access_method SA5_ioaccel_mode1_access = {
52150- SA5_submit_command,
52151- SA5_performant_intr_mask,
52152- SA5_ioaccel_mode1_intr_pending,
52153- SA5_ioaccel_mode1_completed,
52154+ .submit_command = SA5_submit_command,
52155+ .set_intr_mask = SA5_performant_intr_mask,
52156+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52157+ .command_completed = SA5_ioaccel_mode1_completed,
52158 };
52159
52160 static struct access_method SA5_ioaccel_mode2_access = {
52161- SA5_submit_command_ioaccel2,
52162- SA5_performant_intr_mask,
52163- SA5_performant_intr_pending,
52164- SA5_performant_completed,
52165+ .submit_command = SA5_submit_command_ioaccel2,
52166+ .set_intr_mask = SA5_performant_intr_mask,
52167+ .intr_pending = SA5_performant_intr_pending,
52168+ .command_completed = SA5_performant_completed,
52169 };
52170
52171 static struct access_method SA5_performant_access = {
52172- SA5_submit_command,
52173- SA5_performant_intr_mask,
52174- SA5_performant_intr_pending,
52175- SA5_performant_completed,
52176+ .submit_command = SA5_submit_command,
52177+ .set_intr_mask = SA5_performant_intr_mask,
52178+ .intr_pending = SA5_performant_intr_pending,
52179+ .command_completed = SA5_performant_completed,
52180 };
52181
52182 static struct access_method SA5_performant_access_no_read = {
52183- SA5_submit_command_no_read,
52184- SA5_performant_intr_mask,
52185- SA5_performant_intr_pending,
52186- SA5_performant_completed,
52187+ .submit_command = SA5_submit_command_no_read,
52188+ .set_intr_mask = SA5_performant_intr_mask,
52189+ .intr_pending = SA5_performant_intr_pending,
52190+ .command_completed = SA5_performant_completed,
52191 };
52192
52193 struct board_type {
52194diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52195index 1b3a094..068e683 100644
52196--- a/drivers/scsi/libfc/fc_exch.c
52197+++ b/drivers/scsi/libfc/fc_exch.c
52198@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52199 u16 pool_max_index;
52200
52201 struct {
52202- atomic_t no_free_exch;
52203- atomic_t no_free_exch_xid;
52204- atomic_t xid_not_found;
52205- atomic_t xid_busy;
52206- atomic_t seq_not_found;
52207- atomic_t non_bls_resp;
52208+ atomic_unchecked_t no_free_exch;
52209+ atomic_unchecked_t no_free_exch_xid;
52210+ atomic_unchecked_t xid_not_found;
52211+ atomic_unchecked_t xid_busy;
52212+ atomic_unchecked_t seq_not_found;
52213+ atomic_unchecked_t non_bls_resp;
52214 } stats;
52215 };
52216
52217@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52218 /* allocate memory for exchange */
52219 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52220 if (!ep) {
52221- atomic_inc(&mp->stats.no_free_exch);
52222+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52223 goto out;
52224 }
52225 memset(ep, 0, sizeof(*ep));
52226@@ -874,7 +874,7 @@ out:
52227 return ep;
52228 err:
52229 spin_unlock_bh(&pool->lock);
52230- atomic_inc(&mp->stats.no_free_exch_xid);
52231+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52232 mempool_free(ep, mp->ep_pool);
52233 return NULL;
52234 }
52235@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52236 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52237 ep = fc_exch_find(mp, xid);
52238 if (!ep) {
52239- atomic_inc(&mp->stats.xid_not_found);
52240+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52241 reject = FC_RJT_OX_ID;
52242 goto out;
52243 }
52244@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52245 ep = fc_exch_find(mp, xid);
52246 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52247 if (ep) {
52248- atomic_inc(&mp->stats.xid_busy);
52249+ atomic_inc_unchecked(&mp->stats.xid_busy);
52250 reject = FC_RJT_RX_ID;
52251 goto rel;
52252 }
52253@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52254 }
52255 xid = ep->xid; /* get our XID */
52256 } else if (!ep) {
52257- atomic_inc(&mp->stats.xid_not_found);
52258+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52259 reject = FC_RJT_RX_ID; /* XID not found */
52260 goto out;
52261 }
52262@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52263 } else {
52264 sp = &ep->seq;
52265 if (sp->id != fh->fh_seq_id) {
52266- atomic_inc(&mp->stats.seq_not_found);
52267+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52268 if (f_ctl & FC_FC_END_SEQ) {
52269 /*
52270 * Update sequence_id based on incoming last
52271@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52272
52273 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52274 if (!ep) {
52275- atomic_inc(&mp->stats.xid_not_found);
52276+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52277 goto out;
52278 }
52279 if (ep->esb_stat & ESB_ST_COMPLETE) {
52280- atomic_inc(&mp->stats.xid_not_found);
52281+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52282 goto rel;
52283 }
52284 if (ep->rxid == FC_XID_UNKNOWN)
52285 ep->rxid = ntohs(fh->fh_rx_id);
52286 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52287- atomic_inc(&mp->stats.xid_not_found);
52288+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52289 goto rel;
52290 }
52291 if (ep->did != ntoh24(fh->fh_s_id) &&
52292 ep->did != FC_FID_FLOGI) {
52293- atomic_inc(&mp->stats.xid_not_found);
52294+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52295 goto rel;
52296 }
52297 sof = fr_sof(fp);
52298@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52299 sp->ssb_stat |= SSB_ST_RESP;
52300 sp->id = fh->fh_seq_id;
52301 } else if (sp->id != fh->fh_seq_id) {
52302- atomic_inc(&mp->stats.seq_not_found);
52303+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52304 goto rel;
52305 }
52306
52307@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52308 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52309
52310 if (!sp)
52311- atomic_inc(&mp->stats.xid_not_found);
52312+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52313 else
52314- atomic_inc(&mp->stats.non_bls_resp);
52315+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52316
52317 fc_frame_free(fp);
52318 }
52319@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52320
52321 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52322 mp = ema->mp;
52323- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52324+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52325 st->fc_no_free_exch_xid +=
52326- atomic_read(&mp->stats.no_free_exch_xid);
52327- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52328- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52329- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52330- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52331+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52332+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52333+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52334+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52335+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52336 }
52337 }
52338 EXPORT_SYMBOL(fc_exch_update_stats);
52339diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52340index 9c706d8..d3e3ed2 100644
52341--- a/drivers/scsi/libsas/sas_ata.c
52342+++ b/drivers/scsi/libsas/sas_ata.c
52343@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
52344 .postreset = ata_std_postreset,
52345 .error_handler = ata_std_error_handler,
52346 .post_internal_cmd = sas_ata_post_internal,
52347- .qc_defer = ata_std_qc_defer,
52348+ .qc_defer = ata_std_qc_defer,
52349 .qc_prep = ata_noop_qc_prep,
52350 .qc_issue = sas_ata_qc_issue,
52351 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52352diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52353index 434e903..5a4a79b 100644
52354--- a/drivers/scsi/lpfc/lpfc.h
52355+++ b/drivers/scsi/lpfc/lpfc.h
52356@@ -430,7 +430,7 @@ struct lpfc_vport {
52357 struct dentry *debug_nodelist;
52358 struct dentry *vport_debugfs_root;
52359 struct lpfc_debugfs_trc *disc_trc;
52360- atomic_t disc_trc_cnt;
52361+ atomic_unchecked_t disc_trc_cnt;
52362 #endif
52363 uint8_t stat_data_enabled;
52364 uint8_t stat_data_blocked;
52365@@ -880,8 +880,8 @@ struct lpfc_hba {
52366 struct timer_list fabric_block_timer;
52367 unsigned long bit_flags;
52368 #define FABRIC_COMANDS_BLOCKED 0
52369- atomic_t num_rsrc_err;
52370- atomic_t num_cmd_success;
52371+ atomic_unchecked_t num_rsrc_err;
52372+ atomic_unchecked_t num_cmd_success;
52373 unsigned long last_rsrc_error_time;
52374 unsigned long last_ramp_down_time;
52375 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52376@@ -916,7 +916,7 @@ struct lpfc_hba {
52377
52378 struct dentry *debug_slow_ring_trc;
52379 struct lpfc_debugfs_trc *slow_ring_trc;
52380- atomic_t slow_ring_trc_cnt;
52381+ atomic_unchecked_t slow_ring_trc_cnt;
52382 /* iDiag debugfs sub-directory */
52383 struct dentry *idiag_root;
52384 struct dentry *idiag_pci_cfg;
52385diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52386index 5633e7d..8272114 100644
52387--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52388+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52389@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52390
52391 #include <linux/debugfs.h>
52392
52393-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52394+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52395 static unsigned long lpfc_debugfs_start_time = 0L;
52396
52397 /* iDiag */
52398@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52399 lpfc_debugfs_enable = 0;
52400
52401 len = 0;
52402- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52403+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52404 (lpfc_debugfs_max_disc_trc - 1);
52405 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52406 dtp = vport->disc_trc + i;
52407@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52408 lpfc_debugfs_enable = 0;
52409
52410 len = 0;
52411- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52412+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52413 (lpfc_debugfs_max_slow_ring_trc - 1);
52414 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52415 dtp = phba->slow_ring_trc + i;
52416@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52417 !vport || !vport->disc_trc)
52418 return;
52419
52420- index = atomic_inc_return(&vport->disc_trc_cnt) &
52421+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52422 (lpfc_debugfs_max_disc_trc - 1);
52423 dtp = vport->disc_trc + index;
52424 dtp->fmt = fmt;
52425 dtp->data1 = data1;
52426 dtp->data2 = data2;
52427 dtp->data3 = data3;
52428- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52429+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52430 dtp->jif = jiffies;
52431 #endif
52432 return;
52433@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52434 !phba || !phba->slow_ring_trc)
52435 return;
52436
52437- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52438+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52439 (lpfc_debugfs_max_slow_ring_trc - 1);
52440 dtp = phba->slow_ring_trc + index;
52441 dtp->fmt = fmt;
52442 dtp->data1 = data1;
52443 dtp->data2 = data2;
52444 dtp->data3 = data3;
52445- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52446+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52447 dtp->jif = jiffies;
52448 #endif
52449 return;
52450@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52451 "slow_ring buffer\n");
52452 goto debug_failed;
52453 }
52454- atomic_set(&phba->slow_ring_trc_cnt, 0);
52455+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52456 memset(phba->slow_ring_trc, 0,
52457 (sizeof(struct lpfc_debugfs_trc) *
52458 lpfc_debugfs_max_slow_ring_trc));
52459@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52460 "buffer\n");
52461 goto debug_failed;
52462 }
52463- atomic_set(&vport->disc_trc_cnt, 0);
52464+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52465
52466 snprintf(name, sizeof(name), "discovery_trace");
52467 vport->debug_disc_trc =
52468diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52469index 0b2c53a..aec2b45 100644
52470--- a/drivers/scsi/lpfc/lpfc_init.c
52471+++ b/drivers/scsi/lpfc/lpfc_init.c
52472@@ -11290,8 +11290,10 @@ lpfc_init(void)
52473 "misc_register returned with status %d", error);
52474
52475 if (lpfc_enable_npiv) {
52476- lpfc_transport_functions.vport_create = lpfc_vport_create;
52477- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52478+ pax_open_kernel();
52479+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52480+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52481+ pax_close_kernel();
52482 }
52483 lpfc_transport_template =
52484 fc_attach_transport(&lpfc_transport_functions);
52485diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52486index 4f9222e..f1850e3 100644
52487--- a/drivers/scsi/lpfc/lpfc_scsi.c
52488+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52489@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52490 unsigned long expires;
52491
52492 spin_lock_irqsave(&phba->hbalock, flags);
52493- atomic_inc(&phba->num_rsrc_err);
52494+ atomic_inc_unchecked(&phba->num_rsrc_err);
52495 phba->last_rsrc_error_time = jiffies;
52496
52497 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
52498@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52499 unsigned long num_rsrc_err, num_cmd_success;
52500 int i;
52501
52502- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52503- num_cmd_success = atomic_read(&phba->num_cmd_success);
52504+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52505+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52506
52507 /*
52508 * The error and success command counters are global per
52509@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52510 }
52511 }
52512 lpfc_destroy_vport_work_array(phba, vports);
52513- atomic_set(&phba->num_rsrc_err, 0);
52514- atomic_set(&phba->num_cmd_success, 0);
52515+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52516+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52517 }
52518
52519 /**
52520diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52521index 3f26147..ee8efd1 100644
52522--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52523+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52524@@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
52525 {
52526 struct scsi_device *sdev = to_scsi_device(dev);
52527 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52528- static struct _raid_device *raid_device;
52529+ struct _raid_device *raid_device;
52530 unsigned long flags;
52531 Mpi2RaidVolPage0_t vol_pg0;
52532 Mpi2ConfigReply_t mpi_reply;
52533@@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
52534 {
52535 struct scsi_device *sdev = to_scsi_device(dev);
52536 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52537- static struct _raid_device *raid_device;
52538+ struct _raid_device *raid_device;
52539 unsigned long flags;
52540 Mpi2RaidVolPage0_t vol_pg0;
52541 Mpi2ConfigReply_t mpi_reply;
52542@@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52543 Mpi2EventDataIrOperationStatus_t *event_data =
52544 (Mpi2EventDataIrOperationStatus_t *)
52545 fw_event->event_data;
52546- static struct _raid_device *raid_device;
52547+ struct _raid_device *raid_device;
52548 unsigned long flags;
52549 u16 handle;
52550
52551@@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52552 u64 sas_address;
52553 struct _sas_device *sas_device;
52554 struct _sas_node *expander_device;
52555- static struct _raid_device *raid_device;
52556+ struct _raid_device *raid_device;
52557 u8 retry_count;
52558 unsigned long flags;
52559
52560diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52561index ed31d8c..ab856b3 100644
52562--- a/drivers/scsi/pmcraid.c
52563+++ b/drivers/scsi/pmcraid.c
52564@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52565 res->scsi_dev = scsi_dev;
52566 scsi_dev->hostdata = res;
52567 res->change_detected = 0;
52568- atomic_set(&res->read_failures, 0);
52569- atomic_set(&res->write_failures, 0);
52570+ atomic_set_unchecked(&res->read_failures, 0);
52571+ atomic_set_unchecked(&res->write_failures, 0);
52572 rc = 0;
52573 }
52574 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52575@@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52576
52577 /* If this was a SCSI read/write command keep count of errors */
52578 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52579- atomic_inc(&res->read_failures);
52580+ atomic_inc_unchecked(&res->read_failures);
52581 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52582- atomic_inc(&res->write_failures);
52583+ atomic_inc_unchecked(&res->write_failures);
52584
52585 if (!RES_IS_GSCSI(res->cfg_entry) &&
52586 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52587@@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
52588 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52589 * hrrq_id assigned here in queuecommand
52590 */
52591- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52592+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52593 pinstance->num_hrrq;
52594 cmd->cmd_done = pmcraid_io_done;
52595
52596@@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
52597 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52598 * hrrq_id assigned here in queuecommand
52599 */
52600- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52601+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52602 pinstance->num_hrrq;
52603
52604 if (request_size) {
52605@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52606
52607 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52608 /* add resources only after host is added into system */
52609- if (!atomic_read(&pinstance->expose_resources))
52610+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52611 return;
52612
52613 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52614@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52615 init_waitqueue_head(&pinstance->reset_wait_q);
52616
52617 atomic_set(&pinstance->outstanding_cmds, 0);
52618- atomic_set(&pinstance->last_message_id, 0);
52619- atomic_set(&pinstance->expose_resources, 0);
52620+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52621+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52622
52623 INIT_LIST_HEAD(&pinstance->free_res_q);
52624 INIT_LIST_HEAD(&pinstance->used_res_q);
52625@@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52626 /* Schedule worker thread to handle CCN and take care of adding and
52627 * removing devices to OS
52628 */
52629- atomic_set(&pinstance->expose_resources, 1);
52630+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52631 schedule_work(&pinstance->worker_q);
52632 return rc;
52633
52634diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52635index e1d150f..6c6df44 100644
52636--- a/drivers/scsi/pmcraid.h
52637+++ b/drivers/scsi/pmcraid.h
52638@@ -748,7 +748,7 @@ struct pmcraid_instance {
52639 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52640
52641 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52642- atomic_t last_message_id;
52643+ atomic_unchecked_t last_message_id;
52644
52645 /* configuration table */
52646 struct pmcraid_config_table *cfg_table;
52647@@ -777,7 +777,7 @@ struct pmcraid_instance {
52648 atomic_t outstanding_cmds;
52649
52650 /* should add/delete resources to mid-layer now ?*/
52651- atomic_t expose_resources;
52652+ atomic_unchecked_t expose_resources;
52653
52654
52655
52656@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52657 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52658 };
52659 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52660- atomic_t read_failures; /* count of failed READ commands */
52661- atomic_t write_failures; /* count of failed WRITE commands */
52662+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52663+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52664
52665 /* To indicate add/delete/modify during CCN */
52666 u8 change_detected;
52667diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52668index 82b92c4..3178171 100644
52669--- a/drivers/scsi/qla2xxx/qla_attr.c
52670+++ b/drivers/scsi/qla2xxx/qla_attr.c
52671@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52672 return 0;
52673 }
52674
52675-struct fc_function_template qla2xxx_transport_functions = {
52676+fc_function_template_no_const qla2xxx_transport_functions = {
52677
52678 .show_host_node_name = 1,
52679 .show_host_port_name = 1,
52680@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52681 .bsg_timeout = qla24xx_bsg_timeout,
52682 };
52683
52684-struct fc_function_template qla2xxx_transport_vport_functions = {
52685+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52686
52687 .show_host_node_name = 1,
52688 .show_host_port_name = 1,
52689diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52690index 7686bfe..4710893 100644
52691--- a/drivers/scsi/qla2xxx/qla_gbl.h
52692+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52693@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
52694 struct device_attribute;
52695 extern struct device_attribute *qla2x00_host_attrs[];
52696 struct fc_function_template;
52697-extern struct fc_function_template qla2xxx_transport_functions;
52698-extern struct fc_function_template qla2xxx_transport_vport_functions;
52699+extern fc_function_template_no_const qla2xxx_transport_functions;
52700+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52701 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52702 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52703 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52704diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52705index cce1cbc..5b9f0fe 100644
52706--- a/drivers/scsi/qla2xxx/qla_os.c
52707+++ b/drivers/scsi/qla2xxx/qla_os.c
52708@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52709 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52710 /* Ok, a 64bit DMA mask is applicable. */
52711 ha->flags.enable_64bit_addressing = 1;
52712- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52713- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52714+ pax_open_kernel();
52715+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52716+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52717+ pax_close_kernel();
52718 return;
52719 }
52720 }
52721diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52722index 8f6d0fb..1b21097 100644
52723--- a/drivers/scsi/qla4xxx/ql4_def.h
52724+++ b/drivers/scsi/qla4xxx/ql4_def.h
52725@@ -305,7 +305,7 @@ struct ddb_entry {
52726 * (4000 only) */
52727 atomic_t relogin_timer; /* Max Time to wait for
52728 * relogin to complete */
52729- atomic_t relogin_retry_count; /* Num of times relogin has been
52730+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52731 * retried */
52732 uint32_t default_time2wait; /* Default Min time between
52733 * relogins (+aens) */
52734diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52735index 6d25879..3031a9f 100644
52736--- a/drivers/scsi/qla4xxx/ql4_os.c
52737+++ b/drivers/scsi/qla4xxx/ql4_os.c
52738@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52739 */
52740 if (!iscsi_is_session_online(cls_sess)) {
52741 /* Reset retry relogin timer */
52742- atomic_inc(&ddb_entry->relogin_retry_count);
52743+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52744 DEBUG2(ql4_printk(KERN_INFO, ha,
52745 "%s: index[%d] relogin timed out-retrying"
52746 " relogin (%d), retry (%d)\n", __func__,
52747 ddb_entry->fw_ddb_index,
52748- atomic_read(&ddb_entry->relogin_retry_count),
52749+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52750 ddb_entry->default_time2wait + 4));
52751 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52752 atomic_set(&ddb_entry->retry_relogin_timer,
52753@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52754
52755 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52756 atomic_set(&ddb_entry->relogin_timer, 0);
52757- atomic_set(&ddb_entry->relogin_retry_count, 0);
52758+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52759 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52760 ddb_entry->default_relogin_timeout =
52761 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52762diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52763index b1a2631..5bcd9c8 100644
52764--- a/drivers/scsi/scsi_lib.c
52765+++ b/drivers/scsi/scsi_lib.c
52766@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52767 shost = sdev->host;
52768 scsi_init_cmd_errh(cmd);
52769 cmd->result = DID_NO_CONNECT << 16;
52770- atomic_inc(&cmd->device->iorequest_cnt);
52771+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52772
52773 /*
52774 * SCSI request completion path will do scsi_device_unbusy(),
52775@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
52776
52777 INIT_LIST_HEAD(&cmd->eh_entry);
52778
52779- atomic_inc(&cmd->device->iodone_cnt);
52780+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52781 if (cmd->result)
52782- atomic_inc(&cmd->device->ioerr_cnt);
52783+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52784
52785 disposition = scsi_decide_disposition(cmd);
52786 if (disposition != SUCCESS &&
52787@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52788 struct Scsi_Host *host = cmd->device->host;
52789 int rtn = 0;
52790
52791- atomic_inc(&cmd->device->iorequest_cnt);
52792+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52793
52794 /* check if the device is still usable */
52795 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52796diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52797index 1ac38e7..6acc656 100644
52798--- a/drivers/scsi/scsi_sysfs.c
52799+++ b/drivers/scsi/scsi_sysfs.c
52800@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52801 char *buf) \
52802 { \
52803 struct scsi_device *sdev = to_scsi_device(dev); \
52804- unsigned long long count = atomic_read(&sdev->field); \
52805+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52806 return snprintf(buf, 20, "0x%llx\n", count); \
52807 } \
52808 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52809diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52810index 5d6f348..18778a6b 100644
52811--- a/drivers/scsi/scsi_transport_fc.c
52812+++ b/drivers/scsi/scsi_transport_fc.c
52813@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52814 * Netlink Infrastructure
52815 */
52816
52817-static atomic_t fc_event_seq;
52818+static atomic_unchecked_t fc_event_seq;
52819
52820 /**
52821 * fc_get_event_number - Obtain the next sequential FC event number
52822@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
52823 u32
52824 fc_get_event_number(void)
52825 {
52826- return atomic_add_return(1, &fc_event_seq);
52827+ return atomic_add_return_unchecked(1, &fc_event_seq);
52828 }
52829 EXPORT_SYMBOL(fc_get_event_number);
52830
52831@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
52832 {
52833 int error;
52834
52835- atomic_set(&fc_event_seq, 0);
52836+ atomic_set_unchecked(&fc_event_seq, 0);
52837
52838 error = transport_class_register(&fc_host_class);
52839 if (error)
52840@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52841 char *cp;
52842
52843 *val = simple_strtoul(buf, &cp, 0);
52844- if ((*cp && (*cp != '\n')) || (*val < 0))
52845+ if (*cp && (*cp != '\n'))
52846 return -EINVAL;
52847 /*
52848 * Check for overflow; dev_loss_tmo is u32
52849diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52850index 67d43e3..8cee73c 100644
52851--- a/drivers/scsi/scsi_transport_iscsi.c
52852+++ b/drivers/scsi/scsi_transport_iscsi.c
52853@@ -79,7 +79,7 @@ struct iscsi_internal {
52854 struct transport_container session_cont;
52855 };
52856
52857-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52858+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52859 static struct workqueue_struct *iscsi_eh_timer_workq;
52860
52861 static DEFINE_IDA(iscsi_sess_ida);
52862@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52863 int err;
52864
52865 ihost = shost->shost_data;
52866- session->sid = atomic_add_return(1, &iscsi_session_nr);
52867+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52868
52869 if (target_id == ISCSI_MAX_TARGET) {
52870 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52871@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
52872 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52873 ISCSI_TRANSPORT_VERSION);
52874
52875- atomic_set(&iscsi_session_nr, 0);
52876+ atomic_set_unchecked(&iscsi_session_nr, 0);
52877
52878 err = class_register(&iscsi_transport_class);
52879 if (err)
52880diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52881index ae45bd9..c32a586 100644
52882--- a/drivers/scsi/scsi_transport_srp.c
52883+++ b/drivers/scsi/scsi_transport_srp.c
52884@@ -35,7 +35,7 @@
52885 #include "scsi_priv.h"
52886
52887 struct srp_host_attrs {
52888- atomic_t next_port_id;
52889+ atomic_unchecked_t next_port_id;
52890 };
52891 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52892
52893@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
52894 struct Scsi_Host *shost = dev_to_shost(dev);
52895 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
52896
52897- atomic_set(&srp_host->next_port_id, 0);
52898+ atomic_set_unchecked(&srp_host->next_port_id, 0);
52899 return 0;
52900 }
52901
52902@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52903 rport_fast_io_fail_timedout);
52904 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52905
52906- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52907+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52908 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52909
52910 transport_setup_device(&rport->dev);
52911diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52912index 3290a3e..d65ac1c 100644
52913--- a/drivers/scsi/sd.c
52914+++ b/drivers/scsi/sd.c
52915@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
52916 sdkp->disk = gd;
52917 sdkp->index = index;
52918 atomic_set(&sdkp->openers, 0);
52919- atomic_set(&sdkp->device->ioerr_cnt, 0);
52920+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52921
52922 if (!sdp->request_queue->rq_timeout) {
52923 if (sdp->type != TYPE_MOD)
52924diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52925index 2270bd5..98408a5 100644
52926--- a/drivers/scsi/sg.c
52927+++ b/drivers/scsi/sg.c
52928@@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52929 sdp->disk->disk_name,
52930 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
52931 NULL,
52932- (char *)arg);
52933+ (char __user *)arg);
52934 case BLKTRACESTART:
52935 return blk_trace_startstop(sdp->device->request_queue, 1);
52936 case BLKTRACESTOP:
52937diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
52938index c0d660f..24a5854 100644
52939--- a/drivers/soc/tegra/fuse/fuse-tegra.c
52940+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
52941@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
52942 return i;
52943 }
52944
52945-static struct bin_attribute fuse_bin_attr = {
52946+static bin_attribute_no_const fuse_bin_attr = {
52947 .attr = { .name = "fuse", .mode = S_IRUGO, },
52948 .read = fuse_read,
52949 };
52950diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52951index 57a1950..ae54e21 100644
52952--- a/drivers/spi/spi.c
52953+++ b/drivers/spi/spi.c
52954@@ -2307,7 +2307,7 @@ int spi_bus_unlock(struct spi_master *master)
52955 EXPORT_SYMBOL_GPL(spi_bus_unlock);
52956
52957 /* portable code must never pass more than 32 bytes */
52958-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
52959+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
52960
52961 static u8 *buf;
52962
52963diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
52964index b41429f..2de5373 100644
52965--- a/drivers/staging/android/timed_output.c
52966+++ b/drivers/staging/android/timed_output.c
52967@@ -25,7 +25,7 @@
52968 #include "timed_output.h"
52969
52970 static struct class *timed_output_class;
52971-static atomic_t device_count;
52972+static atomic_unchecked_t device_count;
52973
52974 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
52975 char *buf)
52976@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
52977 timed_output_class = class_create(THIS_MODULE, "timed_output");
52978 if (IS_ERR(timed_output_class))
52979 return PTR_ERR(timed_output_class);
52980- atomic_set(&device_count, 0);
52981+ atomic_set_unchecked(&device_count, 0);
52982 timed_output_class->dev_groups = timed_output_groups;
52983 }
52984
52985@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
52986 if (ret < 0)
52987 return ret;
52988
52989- tdev->index = atomic_inc_return(&device_count);
52990+ tdev->index = atomic_inc_return_unchecked(&device_count);
52991 tdev->dev = device_create(timed_output_class, NULL,
52992 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
52993 if (IS_ERR(tdev->dev))
52994diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
52995index 727640e..55bf61c 100644
52996--- a/drivers/staging/comedi/comedi_fops.c
52997+++ b/drivers/staging/comedi/comedi_fops.c
52998@@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
52999 }
53000 cfp->last_attached = dev->attached;
53001 cfp->last_detach_count = dev->detach_count;
53002- ACCESS_ONCE(cfp->read_subdev) = read_s;
53003- ACCESS_ONCE(cfp->write_subdev) = write_s;
53004+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
53005+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
53006 }
53007
53008 static void comedi_file_check(struct file *file)
53009@@ -1924,7 +1924,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53010 !(s_old->async->cmd.flags & CMDF_WRITE))
53011 return -EBUSY;
53012
53013- ACCESS_ONCE(cfp->read_subdev) = s_new;
53014+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
53015 return 0;
53016 }
53017
53018@@ -1966,7 +1966,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53019 (s_old->async->cmd.flags & CMDF_WRITE))
53020 return -EBUSY;
53021
53022- ACCESS_ONCE(cfp->write_subdev) = s_new;
53023+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
53024 return 0;
53025 }
53026
53027diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
53028index 37dcf7e..f3c2016 100644
53029--- a/drivers/staging/fbtft/fbtft-core.c
53030+++ b/drivers/staging/fbtft/fbtft-core.c
53031@@ -689,7 +689,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
53032 {
53033 struct fb_info *info;
53034 struct fbtft_par *par;
53035- struct fb_ops *fbops = NULL;
53036+ fb_ops_no_const *fbops = NULL;
53037 struct fb_deferred_io *fbdefio = NULL;
53038 struct fbtft_platform_data *pdata = dev->platform_data;
53039 u8 *vmem = NULL;
53040diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
53041index 0dbf3f9..fed0063 100644
53042--- a/drivers/staging/fbtft/fbtft.h
53043+++ b/drivers/staging/fbtft/fbtft.h
53044@@ -106,7 +106,7 @@ struct fbtft_ops {
53045
53046 int (*set_var)(struct fbtft_par *par);
53047 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
53048-};
53049+} __no_const;
53050
53051 /**
53052 * struct fbtft_display - Describes the display properties
53053diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53054index 001348c..cfaac8a 100644
53055--- a/drivers/staging/gdm724x/gdm_tty.c
53056+++ b/drivers/staging/gdm724x/gdm_tty.c
53057@@ -44,7 +44,7 @@
53058 #define gdm_tty_send_control(n, r, v, d, l) (\
53059 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53060
53061-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53062+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53063
53064 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53065 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53066diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
53067index d23c3c2..eb63c81 100644
53068--- a/drivers/staging/i2o/i2o.h
53069+++ b/drivers/staging/i2o/i2o.h
53070@@ -565,7 +565,7 @@ struct i2o_controller {
53071 struct i2o_device *exec; /* Executive */
53072 #if BITS_PER_LONG == 64
53073 spinlock_t context_list_lock; /* lock for context_list */
53074- atomic_t context_list_counter; /* needed for unique contexts */
53075+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53076 struct list_head context_list; /* list of context id's
53077 and pointers */
53078 #endif
53079diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
53080index ad84f33..c5bdf65 100644
53081--- a/drivers/staging/i2o/i2o_proc.c
53082+++ b/drivers/staging/i2o/i2o_proc.c
53083@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
53084 "Array Controller Device"
53085 };
53086
53087-static char *chtostr(char *tmp, u8 *chars, int n)
53088-{
53089- tmp[0] = 0;
53090- return strncat(tmp, (char *)chars, n);
53091-}
53092-
53093 static int i2o_report_query_status(struct seq_file *seq, int block_status,
53094 char *group)
53095 {
53096@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
53097 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
53098 {
53099 struct i2o_controller *c = (struct i2o_controller *)seq->private;
53100- static u32 work32[5];
53101- static u8 *work8 = (u8 *) work32;
53102- static u16 *work16 = (u16 *) work32;
53103+ u32 work32[5];
53104+ u8 *work8 = (u8 *) work32;
53105+ u16 *work16 = (u16 *) work32;
53106 int token;
53107 u32 hwcap;
53108
53109@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53110 } *result;
53111
53112 i2o_exec_execute_ddm_table ddm_table;
53113- char tmp[28 + 1];
53114
53115 result = kmalloc(sizeof(*result), GFP_KERNEL);
53116 if (!result)
53117@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53118
53119 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
53120 seq_printf(seq, "%-#8x", ddm_table.module_id);
53121- seq_printf(seq, "%-29s",
53122- chtostr(tmp, ddm_table.module_name_version, 28));
53123+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
53124 seq_printf(seq, "%9d ", ddm_table.data_size);
53125 seq_printf(seq, "%8d", ddm_table.code_size);
53126
53127@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53128
53129 i2o_driver_result_table *result;
53130 i2o_driver_store_table *dst;
53131- char tmp[28 + 1];
53132
53133 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
53134 if (result == NULL)
53135@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53136
53137 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
53138 seq_printf(seq, "%-#8x", dst->module_id);
53139- seq_printf(seq, "%-29s",
53140- chtostr(tmp, dst->module_name_version, 28));
53141- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
53142+ seq_printf(seq, "%-.28s", dst->module_name_version);
53143+ seq_printf(seq, "%-.8s", dst->date);
53144 seq_printf(seq, "%8d ", dst->module_size);
53145 seq_printf(seq, "%8d ", dst->mpb_size);
53146 seq_printf(seq, "0x%04x", dst->module_flags);
53147@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
53148 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53149 {
53150 struct i2o_device *d = (struct i2o_device *)seq->private;
53151- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53152+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53153 // == (allow) 512d bytes (max)
53154- static u16 *work16 = (u16 *) work32;
53155+ u16 *work16 = (u16 *) work32;
53156 int token;
53157- char tmp[16 + 1];
53158
53159 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
53160
53161@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53162 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
53163 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
53164 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
53165- seq_printf(seq, "Vendor info : %s\n",
53166- chtostr(tmp, (u8 *) (work32 + 2), 16));
53167- seq_printf(seq, "Product info : %s\n",
53168- chtostr(tmp, (u8 *) (work32 + 6), 16));
53169- seq_printf(seq, "Description : %s\n",
53170- chtostr(tmp, (u8 *) (work32 + 10), 16));
53171- seq_printf(seq, "Product rev. : %s\n",
53172- chtostr(tmp, (u8 *) (work32 + 14), 8));
53173+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
53174+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
53175+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
53176+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
53177
53178 seq_printf(seq, "Serial number : ");
53179 print_serial_number(seq, (u8 *) (work32 + 16),
53180@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53181 u8 pad[256]; // allow up to 256 byte (max) serial number
53182 } result;
53183
53184- char tmp[24 + 1];
53185-
53186 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
53187
53188 if (token < 0) {
53189@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53190 }
53191
53192 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
53193- seq_printf(seq, "Module name : %s\n",
53194- chtostr(tmp, result.module_name, 24));
53195- seq_printf(seq, "Module revision : %s\n",
53196- chtostr(tmp, result.module_rev, 8));
53197+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
53198+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
53199
53200 seq_printf(seq, "Serial number : ");
53201 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
53202@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53203 u8 instance_number[4];
53204 } result;
53205
53206- char tmp[64 + 1];
53207-
53208 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
53209
53210 if (token < 0) {
53211@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53212 return 0;
53213 }
53214
53215- seq_printf(seq, "Device name : %s\n",
53216- chtostr(tmp, result.device_name, 64));
53217- seq_printf(seq, "Service name : %s\n",
53218- chtostr(tmp, result.service_name, 64));
53219- seq_printf(seq, "Physical name : %s\n",
53220- chtostr(tmp, result.physical_location, 64));
53221- seq_printf(seq, "Instance number : %s\n",
53222- chtostr(tmp, result.instance_number, 4));
53223+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
53224+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
53225+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
53226+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
53227
53228 return 0;
53229 }
53230@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53231 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
53232 {
53233 struct i2o_device *d = (struct i2o_device *)seq->private;
53234- static u32 work32[12];
53235- static u16 *work16 = (u16 *) work32;
53236- static u8 *work8 = (u8 *) work32;
53237+ u32 work32[12];
53238+ u16 *work16 = (u16 *) work32;
53239+ u8 *work8 = (u8 *) work32;
53240 int token;
53241
53242 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
53243diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
53244index 52334fc..d7f40b3 100644
53245--- a/drivers/staging/i2o/iop.c
53246+++ b/drivers/staging/i2o/iop.c
53247@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
53248
53249 spin_lock_irqsave(&c->context_list_lock, flags);
53250
53251- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
53252- atomic_inc(&c->context_list_counter);
53253+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
53254+ atomic_inc_unchecked(&c->context_list_counter);
53255
53256- entry->context = atomic_read(&c->context_list_counter);
53257+ entry->context = atomic_read_unchecked(&c->context_list_counter);
53258
53259 list_add(&entry->list, &c->context_list);
53260
53261@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
53262
53263 #if BITS_PER_LONG == 64
53264 spin_lock_init(&c->context_list_lock);
53265- atomic_set(&c->context_list_counter, 0);
53266+ atomic_set_unchecked(&c->context_list_counter, 0);
53267 INIT_LIST_HEAD(&c->context_list);
53268 #endif
53269
53270diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53271index 463da07..e791ce9 100644
53272--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53273+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53274@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53275 return 0;
53276 }
53277
53278-sfw_test_client_ops_t brw_test_client;
53279-void brw_init_test_client(void)
53280-{
53281- brw_test_client.tso_init = brw_client_init;
53282- brw_test_client.tso_fini = brw_client_fini;
53283- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53284- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53285+sfw_test_client_ops_t brw_test_client = {
53286+ .tso_init = brw_client_init,
53287+ .tso_fini = brw_client_fini,
53288+ .tso_prep_rpc = brw_client_prep_rpc,
53289+ .tso_done_rpc = brw_client_done_rpc,
53290 };
53291
53292 srpc_service_t brw_test_service;
53293diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53294index 5709148..ccd9e0d 100644
53295--- a/drivers/staging/lustre/lnet/selftest/framework.c
53296+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53297@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
53298
53299 extern sfw_test_client_ops_t ping_test_client;
53300 extern srpc_service_t ping_test_service;
53301-extern void ping_init_test_client(void);
53302 extern void ping_init_test_service(void);
53303
53304 extern sfw_test_client_ops_t brw_test_client;
53305 extern srpc_service_t brw_test_service;
53306-extern void brw_init_test_client(void);
53307 extern void brw_init_test_service(void);
53308
53309
53310@@ -1675,12 +1673,10 @@ sfw_startup (void)
53311 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53312 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53313
53314- brw_init_test_client();
53315 brw_init_test_service();
53316 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53317 LASSERT (rc == 0);
53318
53319- ping_init_test_client();
53320 ping_init_test_service();
53321 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53322 LASSERT (rc == 0);
53323diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53324index d8c0df6..5041cbb 100644
53325--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53326+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53327@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53328 return 0;
53329 }
53330
53331-sfw_test_client_ops_t ping_test_client;
53332-void ping_init_test_client(void)
53333-{
53334- ping_test_client.tso_init = ping_client_init;
53335- ping_test_client.tso_fini = ping_client_fini;
53336- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53337- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53338-}
53339+sfw_test_client_ops_t ping_test_client = {
53340+ .tso_init = ping_client_init,
53341+ .tso_fini = ping_client_fini,
53342+ .tso_prep_rpc = ping_client_prep_rpc,
53343+ .tso_done_rpc = ping_client_done_rpc,
53344+};
53345
53346 srpc_service_t ping_test_service;
53347 void ping_init_test_service(void)
53348diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53349index 83bc0a9..12ba00a 100644
53350--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53351+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53352@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
53353 ldlm_completion_callback lcs_completion;
53354 ldlm_blocking_callback lcs_blocking;
53355 ldlm_glimpse_callback lcs_glimpse;
53356-};
53357+} __no_const;
53358
53359 /* ldlm_lockd.c */
53360 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53361diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53362index 2a88b80..62e7e5f 100644
53363--- a/drivers/staging/lustre/lustre/include/obd.h
53364+++ b/drivers/staging/lustre/lustre/include/obd.h
53365@@ -1362,7 +1362,7 @@ struct md_ops {
53366 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53367 * wrapper function in include/linux/obd_class.h.
53368 */
53369-};
53370+} __no_const;
53371
53372 struct lsm_operations {
53373 void (*lsm_free)(struct lov_stripe_md *);
53374diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53375index a4c252f..b21acac 100644
53376--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53377+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53378@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53379 int added = (mode == LCK_NL);
53380 int overlaps = 0;
53381 int splitted = 0;
53382- const struct ldlm_callback_suite null_cbs = { NULL };
53383+ const struct ldlm_callback_suite null_cbs = { };
53384
53385 CDEBUG(D_DLMTRACE,
53386 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53387diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53388index c539e37..743b213 100644
53389--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53390+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53391@@ -237,7 +237,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
53392 loff_t *ppos)
53393 {
53394 int rc, max_delay_cs;
53395- struct ctl_table dummy = *table;
53396+ ctl_table_no_const dummy = *table;
53397 long d;
53398
53399 dummy.data = &max_delay_cs;
53400@@ -270,7 +270,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
53401 loff_t *ppos)
53402 {
53403 int rc, min_delay_cs;
53404- struct ctl_table dummy = *table;
53405+ ctl_table_no_const dummy = *table;
53406 long d;
53407
53408 dummy.data = &min_delay_cs;
53409@@ -302,7 +302,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
53410 void __user *buffer, size_t *lenp, loff_t *ppos)
53411 {
53412 int rc, backoff;
53413- struct ctl_table dummy = *table;
53414+ ctl_table_no_const dummy = *table;
53415
53416 dummy.data = &backoff;
53417 dummy.proc_handler = &proc_dointvec;
53418diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53419index 7dc77dd..289d03e 100644
53420--- a/drivers/staging/lustre/lustre/libcfs/module.c
53421+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53422@@ -313,11 +313,11 @@ out:
53423
53424
53425 struct cfs_psdev_ops libcfs_psdev_ops = {
53426- libcfs_psdev_open,
53427- libcfs_psdev_release,
53428- NULL,
53429- NULL,
53430- libcfs_ioctl
53431+ .p_open = libcfs_psdev_open,
53432+ .p_close = libcfs_psdev_release,
53433+ .p_read = NULL,
53434+ .p_write = NULL,
53435+ .p_ioctl = libcfs_ioctl
53436 };
53437
53438 extern int insert_proc(void);
53439diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53440index 22667db..8b703b6 100644
53441--- a/drivers/staging/octeon/ethernet-rx.c
53442+++ b/drivers/staging/octeon/ethernet-rx.c
53443@@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53444 /* Increment RX stats for virtual ports */
53445 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53446 #ifdef CONFIG_64BIT
53447- atomic64_add(1,
53448+ atomic64_add_unchecked(1,
53449 (atomic64_t *)&priv->stats.rx_packets);
53450- atomic64_add(skb->len,
53451+ atomic64_add_unchecked(skb->len,
53452 (atomic64_t *)&priv->stats.rx_bytes);
53453 #else
53454- atomic_add(1,
53455+ atomic_add_unchecked(1,
53456 (atomic_t *)&priv->stats.rx_packets);
53457- atomic_add(skb->len,
53458+ atomic_add_unchecked(skb->len,
53459 (atomic_t *)&priv->stats.rx_bytes);
53460 #endif
53461 }
53462@@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53463 dev->name);
53464 */
53465 #ifdef CONFIG_64BIT
53466- atomic64_add(1,
53467+ atomic64_add_unchecked(1,
53468 (atomic64_t *)&priv->stats.rx_dropped);
53469 #else
53470- atomic_add(1,
53471+ atomic_add_unchecked(1,
53472 (atomic_t *)&priv->stats.rx_dropped);
53473 #endif
53474 dev_kfree_skb_irq(skb);
53475diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53476index 460e854..f926452 100644
53477--- a/drivers/staging/octeon/ethernet.c
53478+++ b/drivers/staging/octeon/ethernet.c
53479@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53480 * since the RX tasklet also increments it.
53481 */
53482 #ifdef CONFIG_64BIT
53483- atomic64_add(rx_status.dropped_packets,
53484- (atomic64_t *)&priv->stats.rx_dropped);
53485+ atomic64_add_unchecked(rx_status.dropped_packets,
53486+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53487 #else
53488- atomic_add(rx_status.dropped_packets,
53489- (atomic_t *)&priv->stats.rx_dropped);
53490+ atomic_add_unchecked(rx_status.dropped_packets,
53491+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53492 #endif
53493 }
53494
53495diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53496index 3b476d8..f522d68 100644
53497--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53498+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53499@@ -225,7 +225,7 @@ struct hal_ops {
53500
53501 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53502 void (*hal_reset_security_engine)(struct adapter *adapter);
53503-};
53504+} __no_const;
53505
53506 enum rt_eeprom_type {
53507 EEPROM_93C46,
53508diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53509index 070cc03..6806e37 100644
53510--- a/drivers/staging/rtl8712/rtl871x_io.h
53511+++ b/drivers/staging/rtl8712/rtl871x_io.h
53512@@ -108,7 +108,7 @@ struct _io_ops {
53513 u8 *pmem);
53514 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53515 u8 *pmem);
53516-};
53517+} __no_const;
53518
53519 struct io_req {
53520 struct list_head list;
53521diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53522index 98f3ba4..c6a7fce 100644
53523--- a/drivers/staging/unisys/visorchipset/visorchipset.h
53524+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53525@@ -171,7 +171,7 @@ struct visorchipset_busdev_notifiers {
53526 void (*device_resume)(ulong bus_no, ulong dev_no);
53527 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
53528 ulong *max_size);
53529-};
53530+} __no_const;
53531
53532 /* These functions live inside visorchipset, and will be called to indicate
53533 * responses to specific events (by code outside of visorchipset).
53534@@ -186,7 +186,7 @@ struct visorchipset_busdev_responders {
53535 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
53536 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
53537 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
53538-};
53539+} __no_const;
53540
53541 /** Register functions (in the bus driver) to get called by visorchipset
53542 * whenever a bus or device appears for which this service partition is
53543diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53544index 9512af6..045bf5a 100644
53545--- a/drivers/target/sbp/sbp_target.c
53546+++ b/drivers/target/sbp/sbp_target.c
53547@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53548
53549 #define SESSION_MAINTENANCE_INTERVAL HZ
53550
53551-static atomic_t login_id = ATOMIC_INIT(0);
53552+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53553
53554 static void session_maintenance_work(struct work_struct *);
53555 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53556@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53557 login->lun = se_lun;
53558 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53559 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53560- login->login_id = atomic_inc_return(&login_id);
53561+ login->login_id = atomic_inc_return_unchecked(&login_id);
53562
53563 login->tgt_agt = sbp_target_agent_register(login);
53564 if (IS_ERR(login->tgt_agt)) {
53565diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53566index 7faa6ae..ae6c410 100644
53567--- a/drivers/target/target_core_device.c
53568+++ b/drivers/target/target_core_device.c
53569@@ -1495,7 +1495,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53570 spin_lock_init(&dev->se_tmr_lock);
53571 spin_lock_init(&dev->qf_cmd_lock);
53572 sema_init(&dev->caw_sem, 1);
53573- atomic_set(&dev->dev_ordered_id, 0);
53574+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53575 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53576 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53577 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53578diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53579index f786de0..04b643e 100644
53580--- a/drivers/target/target_core_transport.c
53581+++ b/drivers/target/target_core_transport.c
53582@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53583 * Used to determine when ORDERED commands should go from
53584 * Dormant to Active status.
53585 */
53586- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53587+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53588 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53589 cmd->se_ordered_id, cmd->sam_task_attr,
53590 dev->transport->name);
53591diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
53592index 031018e..90981a1 100644
53593--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
53594+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
53595@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
53596 platform_set_drvdata(pdev, priv);
53597
53598 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
53599- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53600- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53601+ pax_open_kernel();
53602+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53603+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53604+ pax_close_kernel();
53605 }
53606 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
53607 priv, &int3400_thermal_ops,
53608diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53609index 668fb1b..2737bbe 100644
53610--- a/drivers/thermal/of-thermal.c
53611+++ b/drivers/thermal/of-thermal.c
53612@@ -31,6 +31,7 @@
53613 #include <linux/export.h>
53614 #include <linux/string.h>
53615 #include <linux/thermal.h>
53616+#include <linux/mm.h>
53617
53618 #include "thermal_core.h"
53619
53620@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53621 tz->ops = ops;
53622 tz->sensor_data = data;
53623
53624- tzd->ops->get_temp = of_thermal_get_temp;
53625- tzd->ops->get_trend = of_thermal_get_trend;
53626- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53627+ pax_open_kernel();
53628+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53629+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53630+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53631+ pax_close_kernel();
53632 mutex_unlock(&tzd->lock);
53633
53634 return tzd;
53635@@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53636 return;
53637
53638 mutex_lock(&tzd->lock);
53639- tzd->ops->get_temp = NULL;
53640- tzd->ops->get_trend = NULL;
53641- tzd->ops->set_emul_temp = NULL;
53642+ pax_open_kernel();
53643+ *(void **)&tzd->ops->get_temp = NULL;
53644+ *(void **)&tzd->ops->get_trend = NULL;
53645+ *(void **)&tzd->ops->set_emul_temp = NULL;
53646+ pax_close_kernel();
53647
53648 tz->ops = NULL;
53649 tz->sensor_data = NULL;
53650diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
53651index 9ea3d9d..53e8792 100644
53652--- a/drivers/thermal/x86_pkg_temp_thermal.c
53653+++ b/drivers/thermal/x86_pkg_temp_thermal.c
53654@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
53655 return NOTIFY_OK;
53656 }
53657
53658-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
53659+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
53660 .notifier_call = pkg_temp_thermal_cpu_callback,
53661 };
53662
53663diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53664index fd66f57..48e6376 100644
53665--- a/drivers/tty/cyclades.c
53666+++ b/drivers/tty/cyclades.c
53667@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53668 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53669 info->port.count);
53670 #endif
53671- info->port.count++;
53672+ atomic_inc(&info->port.count);
53673 #ifdef CY_DEBUG_COUNT
53674 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53675- current->pid, info->port.count);
53676+ current->pid, atomic_read(&info->port.count));
53677 #endif
53678
53679 /*
53680@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53681 for (j = 0; j < cy_card[i].nports; j++) {
53682 info = &cy_card[i].ports[j];
53683
53684- if (info->port.count) {
53685+ if (atomic_read(&info->port.count)) {
53686 /* XXX is the ldisc num worth this? */
53687 struct tty_struct *tty;
53688 struct tty_ldisc *ld;
53689diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53690index 4fcec1d..5a036f7 100644
53691--- a/drivers/tty/hvc/hvc_console.c
53692+++ b/drivers/tty/hvc/hvc_console.c
53693@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53694
53695 spin_lock_irqsave(&hp->port.lock, flags);
53696 /* Check and then increment for fast path open. */
53697- if (hp->port.count++ > 0) {
53698+ if (atomic_inc_return(&hp->port.count) > 1) {
53699 spin_unlock_irqrestore(&hp->port.lock, flags);
53700 hvc_kick();
53701 return 0;
53702@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53703
53704 spin_lock_irqsave(&hp->port.lock, flags);
53705
53706- if (--hp->port.count == 0) {
53707+ if (atomic_dec_return(&hp->port.count) == 0) {
53708 spin_unlock_irqrestore(&hp->port.lock, flags);
53709 /* We are done with the tty pointer now. */
53710 tty_port_tty_set(&hp->port, NULL);
53711@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
53712 */
53713 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
53714 } else {
53715- if (hp->port.count < 0)
53716+ if (atomic_read(&hp->port.count) < 0)
53717 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
53718- hp->vtermno, hp->port.count);
53719+ hp->vtermno, atomic_read(&hp->port.count));
53720 spin_unlock_irqrestore(&hp->port.lock, flags);
53721 }
53722 }
53723@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
53724 * open->hangup case this can be called after the final close so prevent
53725 * that from happening for now.
53726 */
53727- if (hp->port.count <= 0) {
53728+ if (atomic_read(&hp->port.count) <= 0) {
53729 spin_unlock_irqrestore(&hp->port.lock, flags);
53730 return;
53731 }
53732
53733- hp->port.count = 0;
53734+ atomic_set(&hp->port.count, 0);
53735 spin_unlock_irqrestore(&hp->port.lock, flags);
53736 tty_port_tty_set(&hp->port, NULL);
53737
53738@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
53739 return -EPIPE;
53740
53741 /* FIXME what's this (unprotected) check for? */
53742- if (hp->port.count <= 0)
53743+ if (atomic_read(&hp->port.count) <= 0)
53744 return -EIO;
53745
53746 spin_lock_irqsave(&hp->lock, flags);
53747diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
53748index 81ff7e1..dfb7b71 100644
53749--- a/drivers/tty/hvc/hvcs.c
53750+++ b/drivers/tty/hvc/hvcs.c
53751@@ -83,6 +83,7 @@
53752 #include <asm/hvcserver.h>
53753 #include <asm/uaccess.h>
53754 #include <asm/vio.h>
53755+#include <asm/local.h>
53756
53757 /*
53758 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
53759@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
53760
53761 spin_lock_irqsave(&hvcsd->lock, flags);
53762
53763- if (hvcsd->port.count > 0) {
53764+ if (atomic_read(&hvcsd->port.count) > 0) {
53765 spin_unlock_irqrestore(&hvcsd->lock, flags);
53766 printk(KERN_INFO "HVCS: vterm state unchanged. "
53767 "The hvcs device node is still in use.\n");
53768@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
53769 }
53770 }
53771
53772- hvcsd->port.count = 0;
53773+ atomic_set(&hvcsd->port.count, 0);
53774 hvcsd->port.tty = tty;
53775 tty->driver_data = hvcsd;
53776
53777@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
53778 unsigned long flags;
53779
53780 spin_lock_irqsave(&hvcsd->lock, flags);
53781- hvcsd->port.count++;
53782+ atomic_inc(&hvcsd->port.count);
53783 hvcsd->todo_mask |= HVCS_SCHED_READ;
53784 spin_unlock_irqrestore(&hvcsd->lock, flags);
53785
53786@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53787 hvcsd = tty->driver_data;
53788
53789 spin_lock_irqsave(&hvcsd->lock, flags);
53790- if (--hvcsd->port.count == 0) {
53791+ if (atomic_dec_and_test(&hvcsd->port.count)) {
53792
53793 vio_disable_interrupts(hvcsd->vdev);
53794
53795@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
53796
53797 free_irq(irq, hvcsd);
53798 return;
53799- } else if (hvcsd->port.count < 0) {
53800+ } else if (atomic_read(&hvcsd->port.count) < 0) {
53801 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
53802 " is missmanaged.\n",
53803- hvcsd->vdev->unit_address, hvcsd->port.count);
53804+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
53805 }
53806
53807 spin_unlock_irqrestore(&hvcsd->lock, flags);
53808@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53809
53810 spin_lock_irqsave(&hvcsd->lock, flags);
53811 /* Preserve this so that we know how many kref refs to put */
53812- temp_open_count = hvcsd->port.count;
53813+ temp_open_count = atomic_read(&hvcsd->port.count);
53814
53815 /*
53816 * Don't kref put inside the spinlock because the destruction
53817@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
53818 tty->driver_data = NULL;
53819 hvcsd->port.tty = NULL;
53820
53821- hvcsd->port.count = 0;
53822+ atomic_set(&hvcsd->port.count, 0);
53823
53824 /* This will drop any buffered data on the floor which is OK in a hangup
53825 * scenario. */
53826@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
53827 * the middle of a write operation? This is a crummy place to do this
53828 * but we want to keep it all in the spinlock.
53829 */
53830- if (hvcsd->port.count <= 0) {
53831+ if (atomic_read(&hvcsd->port.count) <= 0) {
53832 spin_unlock_irqrestore(&hvcsd->lock, flags);
53833 return -ENODEV;
53834 }
53835@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
53836 {
53837 struct hvcs_struct *hvcsd = tty->driver_data;
53838
53839- if (!hvcsd || hvcsd->port.count <= 0)
53840+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
53841 return 0;
53842
53843 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
53844diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
53845index 4190199..06d5bfa 100644
53846--- a/drivers/tty/hvc/hvsi.c
53847+++ b/drivers/tty/hvc/hvsi.c
53848@@ -85,7 +85,7 @@ struct hvsi_struct {
53849 int n_outbuf;
53850 uint32_t vtermno;
53851 uint32_t virq;
53852- atomic_t seqno; /* HVSI packet sequence number */
53853+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
53854 uint16_t mctrl;
53855 uint8_t state; /* HVSI protocol state */
53856 uint8_t flags;
53857@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
53858
53859 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
53860 packet.hdr.len = sizeof(struct hvsi_query_response);
53861- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53862+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53863 packet.verb = VSV_SEND_VERSION_NUMBER;
53864 packet.u.version = HVSI_VERSION;
53865 packet.query_seqno = query_seqno+1;
53866@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
53867
53868 packet.hdr.type = VS_QUERY_PACKET_HEADER;
53869 packet.hdr.len = sizeof(struct hvsi_query);
53870- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53871+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53872 packet.verb = verb;
53873
53874 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
53875@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
53876 int wrote;
53877
53878 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
53879- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53880+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53881 packet.hdr.len = sizeof(struct hvsi_control);
53882 packet.verb = VSV_SET_MODEM_CTL;
53883 packet.mask = HVSI_TSDTR;
53884@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
53885 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
53886
53887 packet.hdr.type = VS_DATA_PACKET_HEADER;
53888- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53889+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53890 packet.hdr.len = count + sizeof(struct hvsi_header);
53891 memcpy(&packet.data, buf, count);
53892
53893@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
53894 struct hvsi_control packet __ALIGNED__;
53895
53896 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53897- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53898+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53899 packet.hdr.len = 6;
53900 packet.verb = VSV_CLOSE_PROTOCOL;
53901
53902@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53903
53904 tty_port_tty_set(&hp->port, tty);
53905 spin_lock_irqsave(&hp->lock, flags);
53906- hp->port.count++;
53907+ atomic_inc(&hp->port.count);
53908 atomic_set(&hp->seqno, 0);
53909 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53910 spin_unlock_irqrestore(&hp->lock, flags);
53911@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53912
53913 spin_lock_irqsave(&hp->lock, flags);
53914
53915- if (--hp->port.count == 0) {
53916+ if (atomic_dec_return(&hp->port.count) == 0) {
53917 tty_port_tty_set(&hp->port, NULL);
53918 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53919
53920@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53921
53922 spin_lock_irqsave(&hp->lock, flags);
53923 }
53924- } else if (hp->port.count < 0)
53925+ } else if (atomic_read(&hp->port.count) < 0)
53926 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53927- hp - hvsi_ports, hp->port.count);
53928+ hp - hvsi_ports, atomic_read(&hp->port.count));
53929
53930 spin_unlock_irqrestore(&hp->lock, flags);
53931 }
53932@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53933 tty_port_tty_set(&hp->port, NULL);
53934
53935 spin_lock_irqsave(&hp->lock, flags);
53936- hp->port.count = 0;
53937+ atomic_set(&hp->port.count, 0);
53938 hp->n_outbuf = 0;
53939 spin_unlock_irqrestore(&hp->lock, flags);
53940 }
53941diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53942index a270f04..7c77b5d 100644
53943--- a/drivers/tty/hvc/hvsi_lib.c
53944+++ b/drivers/tty/hvc/hvsi_lib.c
53945@@ -8,7 +8,7 @@
53946
53947 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53948 {
53949- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53950+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53951
53952 /* Assumes that always succeeds, works in practice */
53953 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53954@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53955
53956 /* Reset state */
53957 pv->established = 0;
53958- atomic_set(&pv->seqno, 0);
53959+ atomic_set_unchecked(&pv->seqno, 0);
53960
53961 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53962
53963diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53964index 345cebb..d5a1e9e 100644
53965--- a/drivers/tty/ipwireless/tty.c
53966+++ b/drivers/tty/ipwireless/tty.c
53967@@ -28,6 +28,7 @@
53968 #include <linux/tty_driver.h>
53969 #include <linux/tty_flip.h>
53970 #include <linux/uaccess.h>
53971+#include <asm/local.h>
53972
53973 #include "tty.h"
53974 #include "network.h"
53975@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53976 return -ENODEV;
53977
53978 mutex_lock(&tty->ipw_tty_mutex);
53979- if (tty->port.count == 0)
53980+ if (atomic_read(&tty->port.count) == 0)
53981 tty->tx_bytes_queued = 0;
53982
53983- tty->port.count++;
53984+ atomic_inc(&tty->port.count);
53985
53986 tty->port.tty = linux_tty;
53987 linux_tty->driver_data = tty;
53988@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53989
53990 static void do_ipw_close(struct ipw_tty *tty)
53991 {
53992- tty->port.count--;
53993-
53994- if (tty->port.count == 0) {
53995+ if (atomic_dec_return(&tty->port.count) == 0) {
53996 struct tty_struct *linux_tty = tty->port.tty;
53997
53998 if (linux_tty != NULL) {
53999@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
54000 return;
54001
54002 mutex_lock(&tty->ipw_tty_mutex);
54003- if (tty->port.count == 0) {
54004+ if (atomic_read(&tty->port.count) == 0) {
54005 mutex_unlock(&tty->ipw_tty_mutex);
54006 return;
54007 }
54008@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
54009
54010 mutex_lock(&tty->ipw_tty_mutex);
54011
54012- if (!tty->port.count) {
54013+ if (!atomic_read(&tty->port.count)) {
54014 mutex_unlock(&tty->ipw_tty_mutex);
54015 return;
54016 }
54017@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
54018 return -ENODEV;
54019
54020 mutex_lock(&tty->ipw_tty_mutex);
54021- if (!tty->port.count) {
54022+ if (!atomic_read(&tty->port.count)) {
54023 mutex_unlock(&tty->ipw_tty_mutex);
54024 return -EINVAL;
54025 }
54026@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54027 if (!tty)
54028 return -ENODEV;
54029
54030- if (!tty->port.count)
54031+ if (!atomic_read(&tty->port.count))
54032 return -EINVAL;
54033
54034 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54035@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54036 if (!tty)
54037 return 0;
54038
54039- if (!tty->port.count)
54040+ if (!atomic_read(&tty->port.count))
54041 return 0;
54042
54043 return tty->tx_bytes_queued;
54044@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54045 if (!tty)
54046 return -ENODEV;
54047
54048- if (!tty->port.count)
54049+ if (!atomic_read(&tty->port.count))
54050 return -EINVAL;
54051
54052 return get_control_lines(tty);
54053@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54054 if (!tty)
54055 return -ENODEV;
54056
54057- if (!tty->port.count)
54058+ if (!atomic_read(&tty->port.count))
54059 return -EINVAL;
54060
54061 return set_control_lines(tty, set, clear);
54062@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54063 if (!tty)
54064 return -ENODEV;
54065
54066- if (!tty->port.count)
54067+ if (!atomic_read(&tty->port.count))
54068 return -EINVAL;
54069
54070 /* FIXME: Exactly how is the tty object locked here .. */
54071@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54072 * are gone */
54073 mutex_lock(&ttyj->ipw_tty_mutex);
54074 }
54075- while (ttyj->port.count)
54076+ while (atomic_read(&ttyj->port.count))
54077 do_ipw_close(ttyj);
54078 ipwireless_disassociate_network_ttys(network,
54079 ttyj->channel_idx);
54080diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54081index 14c54e0..1efd4f2 100644
54082--- a/drivers/tty/moxa.c
54083+++ b/drivers/tty/moxa.c
54084@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54085 }
54086
54087 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54088- ch->port.count++;
54089+ atomic_inc(&ch->port.count);
54090 tty->driver_data = ch;
54091 tty_port_tty_set(&ch->port, tty);
54092 mutex_lock(&ch->port.mutex);
54093diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54094index c434376..114ce13 100644
54095--- a/drivers/tty/n_gsm.c
54096+++ b/drivers/tty/n_gsm.c
54097@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54098 spin_lock_init(&dlci->lock);
54099 mutex_init(&dlci->mutex);
54100 dlci->fifo = &dlci->_fifo;
54101- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54102+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54103 kfree(dlci);
54104 return NULL;
54105 }
54106@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54107 struct gsm_dlci *dlci = tty->driver_data;
54108 struct tty_port *port = &dlci->port;
54109
54110- port->count++;
54111+ atomic_inc(&port->count);
54112 tty_port_tty_set(port, tty);
54113
54114 dlci->modem_rx = 0;
54115diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54116index cf6e0f2..4283167 100644
54117--- a/drivers/tty/n_tty.c
54118+++ b/drivers/tty/n_tty.c
54119@@ -116,7 +116,7 @@ struct n_tty_data {
54120 int minimum_to_wake;
54121
54122 /* consumer-published */
54123- size_t read_tail;
54124+ size_t read_tail __intentional_overflow(-1);
54125 size_t line_start;
54126
54127 /* protected by output lock */
54128@@ -2547,6 +2547,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54129 {
54130 *ops = tty_ldisc_N_TTY;
54131 ops->owner = NULL;
54132- ops->refcount = ops->flags = 0;
54133+ atomic_set(&ops->refcount, 0);
54134+ ops->flags = 0;
54135 }
54136 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54137diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54138index e72ee62..d977ad9 100644
54139--- a/drivers/tty/pty.c
54140+++ b/drivers/tty/pty.c
54141@@ -848,8 +848,10 @@ static void __init unix98_pty_init(void)
54142 panic("Couldn't register Unix98 pts driver");
54143
54144 /* Now create the /dev/ptmx special device */
54145+ pax_open_kernel();
54146 tty_default_fops(&ptmx_fops);
54147- ptmx_fops.open = ptmx_open;
54148+ *(void **)&ptmx_fops.open = ptmx_open;
54149+ pax_close_kernel();
54150
54151 cdev_init(&ptmx_cdev, &ptmx_fops);
54152 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54153diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54154index c8dd8dc..dca6cfd 100644
54155--- a/drivers/tty/rocket.c
54156+++ b/drivers/tty/rocket.c
54157@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54158 tty->driver_data = info;
54159 tty_port_tty_set(port, tty);
54160
54161- if (port->count++ == 0) {
54162+ if (atomic_inc_return(&port->count) == 1) {
54163 atomic_inc(&rp_num_ports_open);
54164
54165 #ifdef ROCKET_DEBUG_OPEN
54166@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54167 #endif
54168 }
54169 #ifdef ROCKET_DEBUG_OPEN
54170- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54171+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54172 #endif
54173
54174 /*
54175@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54176 spin_unlock_irqrestore(&info->port.lock, flags);
54177 return;
54178 }
54179- if (info->port.count)
54180+ if (atomic_read(&info->port.count))
54181 atomic_dec(&rp_num_ports_open);
54182 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54183 spin_unlock_irqrestore(&info->port.lock, flags);
54184diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54185index aa28209..e08fb85 100644
54186--- a/drivers/tty/serial/ioc4_serial.c
54187+++ b/drivers/tty/serial/ioc4_serial.c
54188@@ -437,7 +437,7 @@ struct ioc4_soft {
54189 } is_intr_info[MAX_IOC4_INTR_ENTS];
54190
54191 /* Number of entries active in the above array */
54192- atomic_t is_num_intrs;
54193+ atomic_unchecked_t is_num_intrs;
54194 } is_intr_type[IOC4_NUM_INTR_TYPES];
54195
54196 /* is_ir_lock must be held while
54197@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54198 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54199 || (type == IOC4_OTHER_INTR_TYPE)));
54200
54201- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54202+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54203 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54204
54205 /* Save off the lower level interrupt handler */
54206@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54207
54208 soft = arg;
54209 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54210- num_intrs = (int)atomic_read(
54211+ num_intrs = (int)atomic_read_unchecked(
54212 &soft->is_intr_type[intr_type].is_num_intrs);
54213
54214 this_mir = this_ir = pending_intrs(soft, intr_type);
54215diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54216index 129dc5b..1da5bb8 100644
54217--- a/drivers/tty/serial/kgdb_nmi.c
54218+++ b/drivers/tty/serial/kgdb_nmi.c
54219@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54220 * I/O utilities that messages sent to the console will automatically
54221 * be displayed on the dbg_io.
54222 */
54223- dbg_io_ops->is_console = true;
54224+ pax_open_kernel();
54225+ *(int *)&dbg_io_ops->is_console = true;
54226+ pax_close_kernel();
54227
54228 return 0;
54229 }
54230diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54231index a260cde..6b2b5ce 100644
54232--- a/drivers/tty/serial/kgdboc.c
54233+++ b/drivers/tty/serial/kgdboc.c
54234@@ -24,8 +24,9 @@
54235 #define MAX_CONFIG_LEN 40
54236
54237 static struct kgdb_io kgdboc_io_ops;
54238+static struct kgdb_io kgdboc_io_ops_console;
54239
54240-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54241+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54242 static int configured = -1;
54243
54244 static char config[MAX_CONFIG_LEN];
54245@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54246 kgdboc_unregister_kbd();
54247 if (configured == 1)
54248 kgdb_unregister_io_module(&kgdboc_io_ops);
54249+ else if (configured == 2)
54250+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54251 }
54252
54253 static int configure_kgdboc(void)
54254@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54255 int err;
54256 char *cptr = config;
54257 struct console *cons;
54258+ int is_console = 0;
54259
54260 err = kgdboc_option_setup(config);
54261 if (err || !strlen(config) || isspace(config[0]))
54262 goto noconfig;
54263
54264 err = -ENODEV;
54265- kgdboc_io_ops.is_console = 0;
54266 kgdb_tty_driver = NULL;
54267
54268 kgdboc_use_kms = 0;
54269@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54270 int idx;
54271 if (cons->device && cons->device(cons, &idx) == p &&
54272 idx == tty_line) {
54273- kgdboc_io_ops.is_console = 1;
54274+ is_console = 1;
54275 break;
54276 }
54277 cons = cons->next;
54278@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54279 kgdb_tty_line = tty_line;
54280
54281 do_register:
54282- err = kgdb_register_io_module(&kgdboc_io_ops);
54283+ if (is_console) {
54284+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54285+ configured = 2;
54286+ } else {
54287+ err = kgdb_register_io_module(&kgdboc_io_ops);
54288+ configured = 1;
54289+ }
54290 if (err)
54291 goto noconfig;
54292
54293@@ -205,8 +214,6 @@ do_register:
54294 if (err)
54295 goto nmi_con_failed;
54296
54297- configured = 1;
54298-
54299 return 0;
54300
54301 nmi_con_failed:
54302@@ -223,7 +230,7 @@ noconfig:
54303 static int __init init_kgdboc(void)
54304 {
54305 /* Already configured? */
54306- if (configured == 1)
54307+ if (configured >= 1)
54308 return 0;
54309
54310 return configure_kgdboc();
54311@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54312 if (config[len - 1] == '\n')
54313 config[len - 1] = '\0';
54314
54315- if (configured == 1)
54316+ if (configured >= 1)
54317 cleanup_kgdboc();
54318
54319 /* Go and configure with the new params. */
54320@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54321 .post_exception = kgdboc_post_exp_handler,
54322 };
54323
54324+static struct kgdb_io kgdboc_io_ops_console = {
54325+ .name = "kgdboc",
54326+ .read_char = kgdboc_get_char,
54327+ .write_char = kgdboc_put_char,
54328+ .pre_exception = kgdboc_pre_exp_handler,
54329+ .post_exception = kgdboc_post_exp_handler,
54330+ .is_console = 1
54331+};
54332+
54333 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54334 /* This is only available if kgdboc is a built in for early debugging */
54335 static int __init kgdboc_early_init(char *opt)
54336diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54337index b73889c..9f74f0a 100644
54338--- a/drivers/tty/serial/msm_serial.c
54339+++ b/drivers/tty/serial/msm_serial.c
54340@@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
54341 .cons = MSM_CONSOLE,
54342 };
54343
54344-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54345+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54346
54347 static const struct of_device_id msm_uartdm_table[] = {
54348 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54349@@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54350 line = pdev->id;
54351
54352 if (line < 0)
54353- line = atomic_inc_return(&msm_uart_next_id) - 1;
54354+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54355
54356 if (unlikely(line < 0 || line >= UART_NR))
54357 return -ENXIO;
54358diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54359index cf08876..711e0bf 100644
54360--- a/drivers/tty/serial/samsung.c
54361+++ b/drivers/tty/serial/samsung.c
54362@@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54363 ourport->tx_in_progress = 0;
54364 }
54365
54366+static int s3c64xx_serial_startup(struct uart_port *port);
54367 static int s3c24xx_serial_startup(struct uart_port *port)
54368 {
54369 struct s3c24xx_uart_port *ourport = to_ourport(port);
54370 int ret;
54371
54372+ /* Startup sequence is different for s3c64xx and higher SoC's */
54373+ if (s3c24xx_serial_has_interrupt_mask(port))
54374+ return s3c64xx_serial_startup(port);
54375+
54376 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54377 port, (unsigned long long)port->mapbase, port->membase);
54378
54379@@ -1697,10 +1702,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54380 /* setup info for port */
54381 port->dev = &platdev->dev;
54382
54383- /* Startup sequence is different for s3c64xx and higher SoC's */
54384- if (s3c24xx_serial_has_interrupt_mask(port))
54385- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54386-
54387 port->uartclk = 1;
54388
54389 if (cfg->uart_flags & UPF_CONS_FLOW) {
54390diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54391index 6a1055a..5ca9ad9 100644
54392--- a/drivers/tty/serial/serial_core.c
54393+++ b/drivers/tty/serial/serial_core.c
54394@@ -1377,7 +1377,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54395 state = drv->state + tty->index;
54396 port = &state->port;
54397 spin_lock_irq(&port->lock);
54398- --port->count;
54399+ atomic_dec(&port->count);
54400 spin_unlock_irq(&port->lock);
54401 return;
54402 }
54403@@ -1387,7 +1387,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54404
54405 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54406
54407- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54408+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54409 return;
54410
54411 /*
54412@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
54413 uart_flush_buffer(tty);
54414 uart_shutdown(tty, state);
54415 spin_lock_irqsave(&port->lock, flags);
54416- port->count = 0;
54417+ atomic_set(&port->count, 0);
54418 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54419 spin_unlock_irqrestore(&port->lock, flags);
54420 tty_port_tty_set(port, NULL);
54421@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54422 pr_debug("uart_open(%d) called\n", line);
54423
54424 spin_lock_irq(&port->lock);
54425- ++port->count;
54426+ atomic_inc(&port->count);
54427 spin_unlock_irq(&port->lock);
54428
54429 /*
54430diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54431index b799170..87dafd5 100644
54432--- a/drivers/tty/synclink.c
54433+++ b/drivers/tty/synclink.c
54434@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54435
54436 if (debug_level >= DEBUG_LEVEL_INFO)
54437 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54438- __FILE__,__LINE__, info->device_name, info->port.count);
54439+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54440
54441 if (tty_port_close_start(&info->port, tty, filp) == 0)
54442 goto cleanup;
54443@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54444 cleanup:
54445 if (debug_level >= DEBUG_LEVEL_INFO)
54446 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54447- tty->driver->name, info->port.count);
54448+ tty->driver->name, atomic_read(&info->port.count));
54449
54450 } /* end of mgsl_close() */
54451
54452@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54453
54454 mgsl_flush_buffer(tty);
54455 shutdown(info);
54456-
54457- info->port.count = 0;
54458+
54459+ atomic_set(&info->port.count, 0);
54460 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54461 info->port.tty = NULL;
54462
54463@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54464
54465 if (debug_level >= DEBUG_LEVEL_INFO)
54466 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54467- __FILE__,__LINE__, tty->driver->name, port->count );
54468+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54469
54470 spin_lock_irqsave(&info->irq_spinlock, flags);
54471- port->count--;
54472+ atomic_dec(&port->count);
54473 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54474 port->blocked_open++;
54475
54476@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54477
54478 if (debug_level >= DEBUG_LEVEL_INFO)
54479 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54480- __FILE__,__LINE__, tty->driver->name, port->count );
54481+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54482
54483 tty_unlock(tty);
54484 schedule();
54485@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54486
54487 /* FIXME: Racy on hangup during close wait */
54488 if (!tty_hung_up_p(filp))
54489- port->count++;
54490+ atomic_inc(&port->count);
54491 port->blocked_open--;
54492
54493 if (debug_level >= DEBUG_LEVEL_INFO)
54494 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54495- __FILE__,__LINE__, tty->driver->name, port->count );
54496+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54497
54498 if (!retval)
54499 port->flags |= ASYNC_NORMAL_ACTIVE;
54500@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54501
54502 if (debug_level >= DEBUG_LEVEL_INFO)
54503 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54504- __FILE__,__LINE__,tty->driver->name, info->port.count);
54505+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54506
54507 /* If port is closing, signal caller to try again */
54508 if (info->port.flags & ASYNC_CLOSING){
54509@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54510 spin_unlock_irqrestore(&info->netlock, flags);
54511 goto cleanup;
54512 }
54513- info->port.count++;
54514+ atomic_inc(&info->port.count);
54515 spin_unlock_irqrestore(&info->netlock, flags);
54516
54517- if (info->port.count == 1) {
54518+ if (atomic_read(&info->port.count) == 1) {
54519 /* 1st open on this device, init hardware */
54520 retval = startup(info);
54521 if (retval < 0)
54522@@ -3442,8 +3442,8 @@ cleanup:
54523 if (retval) {
54524 if (tty->count == 1)
54525 info->port.tty = NULL; /* tty layer will release tty struct */
54526- if(info->port.count)
54527- info->port.count--;
54528+ if (atomic_read(&info->port.count))
54529+ atomic_dec(&info->port.count);
54530 }
54531
54532 return retval;
54533@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54534 unsigned short new_crctype;
54535
54536 /* return error if TTY interface open */
54537- if (info->port.count)
54538+ if (atomic_read(&info->port.count))
54539 return -EBUSY;
54540
54541 switch (encoding)
54542@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
54543
54544 /* arbitrate between network and tty opens */
54545 spin_lock_irqsave(&info->netlock, flags);
54546- if (info->port.count != 0 || info->netcount != 0) {
54547+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54548 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54549 spin_unlock_irqrestore(&info->netlock, flags);
54550 return -EBUSY;
54551@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54552 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54553
54554 /* return error if TTY interface open */
54555- if (info->port.count)
54556+ if (atomic_read(&info->port.count))
54557 return -EBUSY;
54558
54559 if (cmd != SIOCWANDEV)
54560diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54561index 0e8c39b..e0cb171 100644
54562--- a/drivers/tty/synclink_gt.c
54563+++ b/drivers/tty/synclink_gt.c
54564@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54565 tty->driver_data = info;
54566 info->port.tty = tty;
54567
54568- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54569+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54570
54571 /* If port is closing, signal caller to try again */
54572 if (info->port.flags & ASYNC_CLOSING){
54573@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54574 mutex_unlock(&info->port.mutex);
54575 goto cleanup;
54576 }
54577- info->port.count++;
54578+ atomic_inc(&info->port.count);
54579 spin_unlock_irqrestore(&info->netlock, flags);
54580
54581- if (info->port.count == 1) {
54582+ if (atomic_read(&info->port.count) == 1) {
54583 /* 1st open on this device, init hardware */
54584 retval = startup(info);
54585 if (retval < 0) {
54586@@ -715,8 +715,8 @@ cleanup:
54587 if (retval) {
54588 if (tty->count == 1)
54589 info->port.tty = NULL; /* tty layer will release tty struct */
54590- if(info->port.count)
54591- info->port.count--;
54592+ if(atomic_read(&info->port.count))
54593+ atomic_dec(&info->port.count);
54594 }
54595
54596 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54597@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54598
54599 if (sanity_check(info, tty->name, "close"))
54600 return;
54601- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54602+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54603
54604 if (tty_port_close_start(&info->port, tty, filp) == 0)
54605 goto cleanup;
54606@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54607 tty_port_close_end(&info->port, tty);
54608 info->port.tty = NULL;
54609 cleanup:
54610- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54611+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54612 }
54613
54614 static void hangup(struct tty_struct *tty)
54615@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54616 shutdown(info);
54617
54618 spin_lock_irqsave(&info->port.lock, flags);
54619- info->port.count = 0;
54620+ atomic_set(&info->port.count, 0);
54621 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54622 info->port.tty = NULL;
54623 spin_unlock_irqrestore(&info->port.lock, flags);
54624@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54625 unsigned short new_crctype;
54626
54627 /* return error if TTY interface open */
54628- if (info->port.count)
54629+ if (atomic_read(&info->port.count))
54630 return -EBUSY;
54631
54632 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54633@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54634
54635 /* arbitrate between network and tty opens */
54636 spin_lock_irqsave(&info->netlock, flags);
54637- if (info->port.count != 0 || info->netcount != 0) {
54638+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54639 DBGINFO(("%s hdlc_open busy\n", dev->name));
54640 spin_unlock_irqrestore(&info->netlock, flags);
54641 return -EBUSY;
54642@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54643 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54644
54645 /* return error if TTY interface open */
54646- if (info->port.count)
54647+ if (atomic_read(&info->port.count))
54648 return -EBUSY;
54649
54650 if (cmd != SIOCWANDEV)
54651@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54652 if (port == NULL)
54653 continue;
54654 spin_lock(&port->lock);
54655- if ((port->port.count || port->netcount) &&
54656+ if ((atomic_read(&port->port.count) || port->netcount) &&
54657 port->pending_bh && !port->bh_running &&
54658 !port->bh_requested) {
54659 DBGISR(("%s bh queued\n", port->device_name));
54660@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54661 add_wait_queue(&port->open_wait, &wait);
54662
54663 spin_lock_irqsave(&info->lock, flags);
54664- port->count--;
54665+ atomic_dec(&port->count);
54666 spin_unlock_irqrestore(&info->lock, flags);
54667 port->blocked_open++;
54668
54669@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54670 remove_wait_queue(&port->open_wait, &wait);
54671
54672 if (!tty_hung_up_p(filp))
54673- port->count++;
54674+ atomic_inc(&port->count);
54675 port->blocked_open--;
54676
54677 if (!retval)
54678diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54679index c3f9091..abe4601 100644
54680--- a/drivers/tty/synclinkmp.c
54681+++ b/drivers/tty/synclinkmp.c
54682@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54683
54684 if (debug_level >= DEBUG_LEVEL_INFO)
54685 printk("%s(%d):%s open(), old ref count = %d\n",
54686- __FILE__,__LINE__,tty->driver->name, info->port.count);
54687+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54688
54689 /* If port is closing, signal caller to try again */
54690 if (info->port.flags & ASYNC_CLOSING){
54691@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54692 spin_unlock_irqrestore(&info->netlock, flags);
54693 goto cleanup;
54694 }
54695- info->port.count++;
54696+ atomic_inc(&info->port.count);
54697 spin_unlock_irqrestore(&info->netlock, flags);
54698
54699- if (info->port.count == 1) {
54700+ if (atomic_read(&info->port.count) == 1) {
54701 /* 1st open on this device, init hardware */
54702 retval = startup(info);
54703 if (retval < 0)
54704@@ -796,8 +796,8 @@ cleanup:
54705 if (retval) {
54706 if (tty->count == 1)
54707 info->port.tty = NULL; /* tty layer will release tty struct */
54708- if(info->port.count)
54709- info->port.count--;
54710+ if(atomic_read(&info->port.count))
54711+ atomic_dec(&info->port.count);
54712 }
54713
54714 return retval;
54715@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54716
54717 if (debug_level >= DEBUG_LEVEL_INFO)
54718 printk("%s(%d):%s close() entry, count=%d\n",
54719- __FILE__,__LINE__, info->device_name, info->port.count);
54720+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54721
54722 if (tty_port_close_start(&info->port, tty, filp) == 0)
54723 goto cleanup;
54724@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54725 cleanup:
54726 if (debug_level >= DEBUG_LEVEL_INFO)
54727 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
54728- tty->driver->name, info->port.count);
54729+ tty->driver->name, atomic_read(&info->port.count));
54730 }
54731
54732 /* Called by tty_hangup() when a hangup is signaled.
54733@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
54734 shutdown(info);
54735
54736 spin_lock_irqsave(&info->port.lock, flags);
54737- info->port.count = 0;
54738+ atomic_set(&info->port.count, 0);
54739 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54740 info->port.tty = NULL;
54741 spin_unlock_irqrestore(&info->port.lock, flags);
54742@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54743 unsigned short new_crctype;
54744
54745 /* return error if TTY interface open */
54746- if (info->port.count)
54747+ if (atomic_read(&info->port.count))
54748 return -EBUSY;
54749
54750 switch (encoding)
54751@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
54752
54753 /* arbitrate between network and tty opens */
54754 spin_lock_irqsave(&info->netlock, flags);
54755- if (info->port.count != 0 || info->netcount != 0) {
54756+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54757 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54758 spin_unlock_irqrestore(&info->netlock, flags);
54759 return -EBUSY;
54760@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54761 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54762
54763 /* return error if TTY interface open */
54764- if (info->port.count)
54765+ if (atomic_read(&info->port.count))
54766 return -EBUSY;
54767
54768 if (cmd != SIOCWANDEV)
54769@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
54770 * do not request bottom half processing if the
54771 * device is not open in a normal mode.
54772 */
54773- if ( port && (port->port.count || port->netcount) &&
54774+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
54775 port->pending_bh && !port->bh_running &&
54776 !port->bh_requested ) {
54777 if ( debug_level >= DEBUG_LEVEL_ISR )
54778@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54779
54780 if (debug_level >= DEBUG_LEVEL_INFO)
54781 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
54782- __FILE__,__LINE__, tty->driver->name, port->count );
54783+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54784
54785 spin_lock_irqsave(&info->lock, flags);
54786- port->count--;
54787+ atomic_dec(&port->count);
54788 spin_unlock_irqrestore(&info->lock, flags);
54789 port->blocked_open++;
54790
54791@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54792
54793 if (debug_level >= DEBUG_LEVEL_INFO)
54794 printk("%s(%d):%s block_til_ready() count=%d\n",
54795- __FILE__,__LINE__, tty->driver->name, port->count );
54796+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54797
54798 tty_unlock(tty);
54799 schedule();
54800@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54801 set_current_state(TASK_RUNNING);
54802 remove_wait_queue(&port->open_wait, &wait);
54803 if (!tty_hung_up_p(filp))
54804- port->count++;
54805+ atomic_inc(&port->count);
54806 port->blocked_open--;
54807
54808 if (debug_level >= DEBUG_LEVEL_INFO)
54809 printk("%s(%d):%s block_til_ready() after, count=%d\n",
54810- __FILE__,__LINE__, tty->driver->name, port->count );
54811+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54812
54813 if (!retval)
54814 port->flags |= ASYNC_NORMAL_ACTIVE;
54815diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
54816index 259a4d5..9b0c9e7 100644
54817--- a/drivers/tty/sysrq.c
54818+++ b/drivers/tty/sysrq.c
54819@@ -1085,7 +1085,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
54820 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
54821 size_t count, loff_t *ppos)
54822 {
54823- if (count) {
54824+ if (count && capable(CAP_SYS_ADMIN)) {
54825 char c;
54826
54827 if (get_user(c, buf))
54828diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
54829index 2bb4dfc..a7f6e86 100644
54830--- a/drivers/tty/tty_io.c
54831+++ b/drivers/tty/tty_io.c
54832@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
54833
54834 void tty_default_fops(struct file_operations *fops)
54835 {
54836- *fops = tty_fops;
54837+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
54838 }
54839
54840 /*
54841diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
54842index 3737f55..7cef448 100644
54843--- a/drivers/tty/tty_ldisc.c
54844+++ b/drivers/tty/tty_ldisc.c
54845@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
54846 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54847 tty_ldiscs[disc] = new_ldisc;
54848 new_ldisc->num = disc;
54849- new_ldisc->refcount = 0;
54850+ atomic_set(&new_ldisc->refcount, 0);
54851 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54852
54853 return ret;
54854@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
54855 return -EINVAL;
54856
54857 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54858- if (tty_ldiscs[disc]->refcount)
54859+ if (atomic_read(&tty_ldiscs[disc]->refcount))
54860 ret = -EBUSY;
54861 else
54862 tty_ldiscs[disc] = NULL;
54863@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
54864 if (ldops) {
54865 ret = ERR_PTR(-EAGAIN);
54866 if (try_module_get(ldops->owner)) {
54867- ldops->refcount++;
54868+ atomic_inc(&ldops->refcount);
54869 ret = ldops;
54870 }
54871 }
54872@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
54873 unsigned long flags;
54874
54875 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
54876- ldops->refcount--;
54877+ atomic_dec(&ldops->refcount);
54878 module_put(ldops->owner);
54879 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
54880 }
54881diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
54882index 40b31835..94d92ae 100644
54883--- a/drivers/tty/tty_port.c
54884+++ b/drivers/tty/tty_port.c
54885@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
54886 unsigned long flags;
54887
54888 spin_lock_irqsave(&port->lock, flags);
54889- port->count = 0;
54890+ atomic_set(&port->count, 0);
54891 port->flags &= ~ASYNC_NORMAL_ACTIVE;
54892 tty = port->tty;
54893 if (tty)
54894@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54895
54896 /* The port lock protects the port counts */
54897 spin_lock_irqsave(&port->lock, flags);
54898- port->count--;
54899+ atomic_dec(&port->count);
54900 port->blocked_open++;
54901 spin_unlock_irqrestore(&port->lock, flags);
54902
54903@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54904 we must not mess that up further */
54905 spin_lock_irqsave(&port->lock, flags);
54906 if (!tty_hung_up_p(filp))
54907- port->count++;
54908+ atomic_inc(&port->count);
54909 port->blocked_open--;
54910 if (retval == 0)
54911 port->flags |= ASYNC_NORMAL_ACTIVE;
54912@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
54913 return 0;
54914
54915 spin_lock_irqsave(&port->lock, flags);
54916- if (tty->count == 1 && port->count != 1) {
54917+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54918 printk(KERN_WARNING
54919 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54920- port->count);
54921- port->count = 1;
54922+ atomic_read(&port->count));
54923+ atomic_set(&port->count, 1);
54924 }
54925- if (--port->count < 0) {
54926+ if (atomic_dec_return(&port->count) < 0) {
54927 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54928- port->count);
54929- port->count = 0;
54930+ atomic_read(&port->count));
54931+ atomic_set(&port->count, 0);
54932 }
54933
54934- if (port->count) {
54935+ if (atomic_read(&port->count)) {
54936 spin_unlock_irqrestore(&port->lock, flags);
54937 return 0;
54938 }
54939@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54940 struct file *filp)
54941 {
54942 spin_lock_irq(&port->lock);
54943- ++port->count;
54944+ atomic_inc(&port->count);
54945 spin_unlock_irq(&port->lock);
54946 tty_port_tty_set(port, tty);
54947
54948diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54949index 8a89f6e..50b32af 100644
54950--- a/drivers/tty/vt/keyboard.c
54951+++ b/drivers/tty/vt/keyboard.c
54952@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54953 kbd->kbdmode == VC_OFF) &&
54954 value != KVAL(K_SAK))
54955 return; /* SAK is allowed even in raw mode */
54956+
54957+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54958+ {
54959+ void *func = fn_handler[value];
54960+ if (func == fn_show_state || func == fn_show_ptregs ||
54961+ func == fn_show_mem)
54962+ return;
54963+ }
54964+#endif
54965+
54966 fn_handler[value](vc);
54967 }
54968
54969@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54970 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54971 return -EFAULT;
54972
54973- if (!capable(CAP_SYS_TTY_CONFIG))
54974- perm = 0;
54975-
54976 switch (cmd) {
54977 case KDGKBENT:
54978 /* Ensure another thread doesn't free it under us */
54979@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54980 spin_unlock_irqrestore(&kbd_event_lock, flags);
54981 return put_user(val, &user_kbe->kb_value);
54982 case KDSKBENT:
54983+ if (!capable(CAP_SYS_TTY_CONFIG))
54984+ perm = 0;
54985+
54986 if (!perm)
54987 return -EPERM;
54988 if (!i && v == K_NOSUCHMAP) {
54989@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54990 int i, j, k;
54991 int ret;
54992
54993- if (!capable(CAP_SYS_TTY_CONFIG))
54994- perm = 0;
54995-
54996 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54997 if (!kbs) {
54998 ret = -ENOMEM;
54999@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55000 kfree(kbs);
55001 return ((p && *p) ? -EOVERFLOW : 0);
55002 case KDSKBSENT:
55003+ if (!capable(CAP_SYS_TTY_CONFIG))
55004+ perm = 0;
55005+
55006 if (!perm) {
55007 ret = -EPERM;
55008 goto reterr;
55009diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
55010index 6276f13..84f2449 100644
55011--- a/drivers/uio/uio.c
55012+++ b/drivers/uio/uio.c
55013@@ -25,6 +25,7 @@
55014 #include <linux/kobject.h>
55015 #include <linux/cdev.h>
55016 #include <linux/uio_driver.h>
55017+#include <asm/local.h>
55018
55019 #define UIO_MAX_DEVICES (1U << MINORBITS)
55020
55021@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
55022 struct device_attribute *attr, char *buf)
55023 {
55024 struct uio_device *idev = dev_get_drvdata(dev);
55025- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55026+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55027 }
55028 static DEVICE_ATTR_RO(event);
55029
55030@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
55031 {
55032 struct uio_device *idev = info->uio_dev;
55033
55034- atomic_inc(&idev->event);
55035+ atomic_inc_unchecked(&idev->event);
55036 wake_up_interruptible(&idev->wait);
55037 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55038 }
55039@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55040 }
55041
55042 listener->dev = idev;
55043- listener->event_count = atomic_read(&idev->event);
55044+ listener->event_count = atomic_read_unchecked(&idev->event);
55045 filep->private_data = listener;
55046
55047 if (idev->info->open) {
55048@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55049 return -EIO;
55050
55051 poll_wait(filep, &idev->wait, wait);
55052- if (listener->event_count != atomic_read(&idev->event))
55053+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55054 return POLLIN | POLLRDNORM;
55055 return 0;
55056 }
55057@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55058 do {
55059 set_current_state(TASK_INTERRUPTIBLE);
55060
55061- event_count = atomic_read(&idev->event);
55062+ event_count = atomic_read_unchecked(&idev->event);
55063 if (event_count != listener->event_count) {
55064 if (copy_to_user(buf, &event_count, count))
55065 retval = -EFAULT;
55066@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55067 static int uio_find_mem_index(struct vm_area_struct *vma)
55068 {
55069 struct uio_device *idev = vma->vm_private_data;
55070+ unsigned long size;
55071
55072 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55073- if (idev->info->mem[vma->vm_pgoff].size == 0)
55074+ size = idev->info->mem[vma->vm_pgoff].size;
55075+ if (size == 0)
55076+ return -1;
55077+ if (vma->vm_end - vma->vm_start > size)
55078 return -1;
55079 return (int)vma->vm_pgoff;
55080 }
55081@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
55082 idev->owner = owner;
55083 idev->info = info;
55084 init_waitqueue_head(&idev->wait);
55085- atomic_set(&idev->event, 0);
55086+ atomic_set_unchecked(&idev->event, 0);
55087
55088 ret = uio_get_minor(idev);
55089 if (ret)
55090diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55091index 813d4d3..a71934f 100644
55092--- a/drivers/usb/atm/cxacru.c
55093+++ b/drivers/usb/atm/cxacru.c
55094@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55095 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55096 if (ret < 2)
55097 return -EINVAL;
55098- if (index < 0 || index > 0x7f)
55099+ if (index > 0x7f)
55100 return -EINVAL;
55101 pos += tmp;
55102
55103diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55104index dada014..1d0d517 100644
55105--- a/drivers/usb/atm/usbatm.c
55106+++ b/drivers/usb/atm/usbatm.c
55107@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55108 if (printk_ratelimit())
55109 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55110 __func__, vpi, vci);
55111- atomic_inc(&vcc->stats->rx_err);
55112+ atomic_inc_unchecked(&vcc->stats->rx_err);
55113 return;
55114 }
55115
55116@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55117 if (length > ATM_MAX_AAL5_PDU) {
55118 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55119 __func__, length, vcc);
55120- atomic_inc(&vcc->stats->rx_err);
55121+ atomic_inc_unchecked(&vcc->stats->rx_err);
55122 goto out;
55123 }
55124
55125@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55126 if (sarb->len < pdu_length) {
55127 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55128 __func__, pdu_length, sarb->len, vcc);
55129- atomic_inc(&vcc->stats->rx_err);
55130+ atomic_inc_unchecked(&vcc->stats->rx_err);
55131 goto out;
55132 }
55133
55134 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55135 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55136 __func__, vcc);
55137- atomic_inc(&vcc->stats->rx_err);
55138+ atomic_inc_unchecked(&vcc->stats->rx_err);
55139 goto out;
55140 }
55141
55142@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55143 if (printk_ratelimit())
55144 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55145 __func__, length);
55146- atomic_inc(&vcc->stats->rx_drop);
55147+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55148 goto out;
55149 }
55150
55151@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55152
55153 vcc->push(vcc, skb);
55154
55155- atomic_inc(&vcc->stats->rx);
55156+ atomic_inc_unchecked(&vcc->stats->rx);
55157 out:
55158 skb_trim(sarb, 0);
55159 }
55160@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55161 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55162
55163 usbatm_pop(vcc, skb);
55164- atomic_inc(&vcc->stats->tx);
55165+ atomic_inc_unchecked(&vcc->stats->tx);
55166
55167 skb = skb_dequeue(&instance->sndqueue);
55168 }
55169@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55170 if (!left--)
55171 return sprintf(page,
55172 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55173- atomic_read(&atm_dev->stats.aal5.tx),
55174- atomic_read(&atm_dev->stats.aal5.tx_err),
55175- atomic_read(&atm_dev->stats.aal5.rx),
55176- atomic_read(&atm_dev->stats.aal5.rx_err),
55177- atomic_read(&atm_dev->stats.aal5.rx_drop));
55178+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55179+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55180+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55181+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55182+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55183
55184 if (!left--) {
55185 if (instance->disconnected)
55186diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55187index 2a3bbdf..91d72cf 100644
55188--- a/drivers/usb/core/devices.c
55189+++ b/drivers/usb/core/devices.c
55190@@ -126,7 +126,7 @@ static const char format_endpt[] =
55191 * time it gets called.
55192 */
55193 static struct device_connect_event {
55194- atomic_t count;
55195+ atomic_unchecked_t count;
55196 wait_queue_head_t wait;
55197 } device_event = {
55198 .count = ATOMIC_INIT(1),
55199@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55200
55201 void usbfs_conn_disc_event(void)
55202 {
55203- atomic_add(2, &device_event.count);
55204+ atomic_add_unchecked(2, &device_event.count);
55205 wake_up(&device_event.wait);
55206 }
55207
55208@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55209
55210 poll_wait(file, &device_event.wait, wait);
55211
55212- event_count = atomic_read(&device_event.count);
55213+ event_count = atomic_read_unchecked(&device_event.count);
55214 if (file->f_version != event_count) {
55215 file->f_version = event_count;
55216 return POLLIN | POLLRDNORM;
55217diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55218index 1163553..f292679 100644
55219--- a/drivers/usb/core/devio.c
55220+++ b/drivers/usb/core/devio.c
55221@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55222 struct usb_dev_state *ps = file->private_data;
55223 struct usb_device *dev = ps->dev;
55224 ssize_t ret = 0;
55225- unsigned len;
55226+ size_t len;
55227 loff_t pos;
55228 int i;
55229
55230@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55231 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55232 struct usb_config_descriptor *config =
55233 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55234- unsigned int length = le16_to_cpu(config->wTotalLength);
55235+ size_t length = le16_to_cpu(config->wTotalLength);
55236
55237 if (*ppos < pos + length) {
55238
55239 /* The descriptor may claim to be longer than it
55240 * really is. Here is the actual allocated length. */
55241- unsigned alloclen =
55242+ size_t alloclen =
55243 le16_to_cpu(dev->config[i].desc.wTotalLength);
55244
55245- len = length - (*ppos - pos);
55246+ len = length + pos - *ppos;
55247 if (len > nbytes)
55248 len = nbytes;
55249
55250 /* Simply don't write (skip over) unallocated parts */
55251 if (alloclen > (*ppos - pos)) {
55252- alloclen -= (*ppos - pos);
55253+ alloclen = alloclen + pos - *ppos;
55254 if (copy_to_user(buf,
55255 dev->rawdescriptors[i] + (*ppos - pos),
55256 min(len, alloclen))) {
55257diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55258index 45a915c..09f9735 100644
55259--- a/drivers/usb/core/hcd.c
55260+++ b/drivers/usb/core/hcd.c
55261@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55262 */
55263 usb_get_urb(urb);
55264 atomic_inc(&urb->use_count);
55265- atomic_inc(&urb->dev->urbnum);
55266+ atomic_inc_unchecked(&urb->dev->urbnum);
55267 usbmon_urb_submit(&hcd->self, urb);
55268
55269 /* NOTE requirements on root-hub callers (usbfs and the hub
55270@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55271 urb->hcpriv = NULL;
55272 INIT_LIST_HEAD(&urb->urb_list);
55273 atomic_dec(&urb->use_count);
55274- atomic_dec(&urb->dev->urbnum);
55275+ atomic_dec_unchecked(&urb->dev->urbnum);
55276 if (atomic_read(&urb->reject))
55277 wake_up(&usb_kill_urb_queue);
55278 usb_put_urb(urb);
55279diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55280index 3b71516..1f26579 100644
55281--- a/drivers/usb/core/hub.c
55282+++ b/drivers/usb/core/hub.c
55283@@ -26,6 +26,7 @@
55284 #include <linux/mutex.h>
55285 #include <linux/random.h>
55286 #include <linux/pm_qos.h>
55287+#include <linux/grsecurity.h>
55288
55289 #include <asm/uaccess.h>
55290 #include <asm/byteorder.h>
55291@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55292 goto done;
55293 return;
55294 }
55295+
55296+ if (gr_handle_new_usb())
55297+ goto done;
55298+
55299 if (hub_is_superspeed(hub->hdev))
55300 unit_load = 150;
55301 else
55302diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55303index f368d20..0c30ac5 100644
55304--- a/drivers/usb/core/message.c
55305+++ b/drivers/usb/core/message.c
55306@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55307 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55308 * error number.
55309 */
55310-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55311+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55312 __u8 requesttype, __u16 value, __u16 index, void *data,
55313 __u16 size, int timeout)
55314 {
55315@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55316 * If successful, 0. Otherwise a negative error number. The number of actual
55317 * bytes transferred will be stored in the @actual_length parameter.
55318 */
55319-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55320+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55321 void *data, int len, int *actual_length, int timeout)
55322 {
55323 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55324@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55325 * bytes transferred will be stored in the @actual_length parameter.
55326 *
55327 */
55328-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55329+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55330 void *data, int len, int *actual_length, int timeout)
55331 {
55332 struct urb *urb;
55333diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55334index d269738..7340cd7 100644
55335--- a/drivers/usb/core/sysfs.c
55336+++ b/drivers/usb/core/sysfs.c
55337@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55338 struct usb_device *udev;
55339
55340 udev = to_usb_device(dev);
55341- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55342+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55343 }
55344 static DEVICE_ATTR_RO(urbnum);
55345
55346diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55347index b1fb9ae..4224885 100644
55348--- a/drivers/usb/core/usb.c
55349+++ b/drivers/usb/core/usb.c
55350@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55351 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55352 dev->state = USB_STATE_ATTACHED;
55353 dev->lpm_disable_count = 1;
55354- atomic_set(&dev->urbnum, 0);
55355+ atomic_set_unchecked(&dev->urbnum, 0);
55356
55357 INIT_LIST_HEAD(&dev->ep0.urb_list);
55358 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55359diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55360index 8cfc319..4868255 100644
55361--- a/drivers/usb/early/ehci-dbgp.c
55362+++ b/drivers/usb/early/ehci-dbgp.c
55363@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55364
55365 #ifdef CONFIG_KGDB
55366 static struct kgdb_io kgdbdbgp_io_ops;
55367-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55368+static struct kgdb_io kgdbdbgp_io_ops_console;
55369+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55370 #else
55371 #define dbgp_kgdb_mode (0)
55372 #endif
55373@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55374 .write_char = kgdbdbgp_write_char,
55375 };
55376
55377+static struct kgdb_io kgdbdbgp_io_ops_console = {
55378+ .name = "kgdbdbgp",
55379+ .read_char = kgdbdbgp_read_char,
55380+ .write_char = kgdbdbgp_write_char,
55381+ .is_console = 1
55382+};
55383+
55384 static int kgdbdbgp_wait_time;
55385
55386 static int __init kgdbdbgp_parse_config(char *str)
55387@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55388 ptr++;
55389 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55390 }
55391- kgdb_register_io_module(&kgdbdbgp_io_ops);
55392- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55393+ if (early_dbgp_console.index != -1)
55394+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55395+ else
55396+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55397
55398 return 0;
55399 }
55400diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55401index 9719abf..789d5d9 100644
55402--- a/drivers/usb/gadget/function/f_uac1.c
55403+++ b/drivers/usb/gadget/function/f_uac1.c
55404@@ -14,6 +14,7 @@
55405 #include <linux/module.h>
55406 #include <linux/device.h>
55407 #include <linux/atomic.h>
55408+#include <linux/module.h>
55409
55410 #include "u_uac1.h"
55411
55412diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55413index 491082a..dfd7d17 100644
55414--- a/drivers/usb/gadget/function/u_serial.c
55415+++ b/drivers/usb/gadget/function/u_serial.c
55416@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55417 spin_lock_irq(&port->port_lock);
55418
55419 /* already open? Great. */
55420- if (port->port.count) {
55421+ if (atomic_read(&port->port.count)) {
55422 status = 0;
55423- port->port.count++;
55424+ atomic_inc(&port->port.count);
55425
55426 /* currently opening/closing? wait ... */
55427 } else if (port->openclose) {
55428@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55429 tty->driver_data = port;
55430 port->port.tty = tty;
55431
55432- port->port.count = 1;
55433+ atomic_set(&port->port.count, 1);
55434 port->openclose = false;
55435
55436 /* if connected, start the I/O stream */
55437@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55438
55439 spin_lock_irq(&port->port_lock);
55440
55441- if (port->port.count != 1) {
55442- if (port->port.count == 0)
55443+ if (atomic_read(&port->port.count) != 1) {
55444+ if (atomic_read(&port->port.count) == 0)
55445 WARN_ON(1);
55446 else
55447- --port->port.count;
55448+ atomic_dec(&port->port.count);
55449 goto exit;
55450 }
55451
55452@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55453 * and sleep if necessary
55454 */
55455 port->openclose = true;
55456- port->port.count = 0;
55457+ atomic_set(&port->port.count, 0);
55458
55459 gser = port->port_usb;
55460 if (gser && gser->disconnect)
55461@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
55462 int cond;
55463
55464 spin_lock_irq(&port->port_lock);
55465- cond = (port->port.count == 0) && !port->openclose;
55466+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55467 spin_unlock_irq(&port->port_lock);
55468 return cond;
55469 }
55470@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55471 /* if it's already open, start I/O ... and notify the serial
55472 * protocol about open/close status (connect/disconnect).
55473 */
55474- if (port->port.count) {
55475+ if (atomic_read(&port->port.count)) {
55476 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55477 gs_start_io(port);
55478 if (gser->connect)
55479@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
55480
55481 port->port_usb = NULL;
55482 gser->ioport = NULL;
55483- if (port->port.count > 0 || port->openclose) {
55484+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55485 wake_up_interruptible(&port->drain_wait);
55486 if (port->port.tty)
55487 tty_hangup(port->port.tty);
55488@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
55489
55490 /* finally, free any unused/unusable I/O buffers */
55491 spin_lock_irqsave(&port->port_lock, flags);
55492- if (port->port.count == 0 && !port->openclose)
55493+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55494 gs_buf_free(&port->port_write_buf);
55495 gs_free_requests(gser->out, &port->read_pool, NULL);
55496 gs_free_requests(gser->out, &port->read_queue, NULL);
55497diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55498index c78c841..48fd281 100644
55499--- a/drivers/usb/gadget/function/u_uac1.c
55500+++ b/drivers/usb/gadget/function/u_uac1.c
55501@@ -17,6 +17,7 @@
55502 #include <linux/ctype.h>
55503 #include <linux/random.h>
55504 #include <linux/syscalls.h>
55505+#include <linux/module.h>
55506
55507 #include "u_uac1.h"
55508
55509diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55510index 7354d01..299478e 100644
55511--- a/drivers/usb/host/ehci-hub.c
55512+++ b/drivers/usb/host/ehci-hub.c
55513@@ -772,7 +772,7 @@ static struct urb *request_single_step_set_feature_urb(
55514 urb->transfer_flags = URB_DIR_IN;
55515 usb_get_urb(urb);
55516 atomic_inc(&urb->use_count);
55517- atomic_inc(&urb->dev->urbnum);
55518+ atomic_inc_unchecked(&urb->dev->urbnum);
55519 urb->setup_dma = dma_map_single(
55520 hcd->self.controller,
55521 urb->setup_packet,
55522@@ -839,7 +839,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55523 urb->status = -EINPROGRESS;
55524 usb_get_urb(urb);
55525 atomic_inc(&urb->use_count);
55526- atomic_inc(&urb->dev->urbnum);
55527+ atomic_inc_unchecked(&urb->dev->urbnum);
55528 retval = submit_single_step_set_feature(hcd, urb, 0);
55529 if (!retval && !wait_for_completion_timeout(&done,
55530 msecs_to_jiffies(2000))) {
55531diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55532index 1db0626..4948782 100644
55533--- a/drivers/usb/host/hwa-hc.c
55534+++ b/drivers/usb/host/hwa-hc.c
55535@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55536 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55537 struct wahc *wa = &hwahc->wa;
55538 struct device *dev = &wa->usb_iface->dev;
55539- u8 mas_le[UWB_NUM_MAS/8];
55540+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55541+
55542+ if (mas_le == NULL)
55543+ return -ENOMEM;
55544
55545 /* Set the stream index */
55546 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55547@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55548 WUSB_REQ_SET_WUSB_MAS,
55549 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55550 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55551- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55552+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55553 if (result < 0)
55554 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55555 out:
55556+ kfree(mas_le);
55557+
55558 return result;
55559 }
55560
55561diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55562index b3d245e..99549ed 100644
55563--- a/drivers/usb/misc/appledisplay.c
55564+++ b/drivers/usb/misc/appledisplay.c
55565@@ -84,7 +84,7 @@ struct appledisplay {
55566 struct mutex sysfslock; /* concurrent read and write */
55567 };
55568
55569-static atomic_t count_displays = ATOMIC_INIT(0);
55570+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55571 static struct workqueue_struct *wq;
55572
55573 static void appledisplay_complete(struct urb *urb)
55574@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55575
55576 /* Register backlight device */
55577 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55578- atomic_inc_return(&count_displays) - 1);
55579+ atomic_inc_return_unchecked(&count_displays) - 1);
55580 memset(&props, 0, sizeof(struct backlight_properties));
55581 props.type = BACKLIGHT_RAW;
55582 props.max_brightness = 0xff;
55583diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55584index 3806e70..55c508b 100644
55585--- a/drivers/usb/serial/console.c
55586+++ b/drivers/usb/serial/console.c
55587@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
55588
55589 info->port = port;
55590
55591- ++port->port.count;
55592+ atomic_inc(&port->port.count);
55593 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55594 if (serial->type->set_termios) {
55595 /*
55596@@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
55597 }
55598 /* Now that any required fake tty operations are completed restore
55599 * the tty port count */
55600- --port->port.count;
55601+ atomic_dec(&port->port.count);
55602 /* The console is special in terms of closing the device so
55603 * indicate this port is now acting as a system console. */
55604 port->port.console = 1;
55605@@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
55606 put_tty:
55607 tty_kref_put(tty);
55608 reset_open_count:
55609- port->port.count = 0;
55610+ atomic_set(&port->port.count, 0);
55611 usb_autopm_put_interface(serial->interface);
55612 error_get_interface:
55613 usb_serial_put(serial);
55614@@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
55615 static void usb_console_write(struct console *co,
55616 const char *buf, unsigned count)
55617 {
55618- static struct usbcons_info *info = &usbcons_info;
55619+ struct usbcons_info *info = &usbcons_info;
55620 struct usb_serial_port *port = info->port;
55621 struct usb_serial *serial;
55622 int retval = -ENODEV;
55623diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55624index 307e339..6aa97cb 100644
55625--- a/drivers/usb/storage/usb.h
55626+++ b/drivers/usb/storage/usb.h
55627@@ -63,7 +63,7 @@ struct us_unusual_dev {
55628 __u8 useProtocol;
55629 __u8 useTransport;
55630 int (*initFunction)(struct us_data *);
55631-};
55632+} __do_const;
55633
55634
55635 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55636diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
55637index a863a98..d272795 100644
55638--- a/drivers/usb/usbip/vhci.h
55639+++ b/drivers/usb/usbip/vhci.h
55640@@ -83,7 +83,7 @@ struct vhci_hcd {
55641 unsigned resuming:1;
55642 unsigned long re_timeout;
55643
55644- atomic_t seqnum;
55645+ atomic_unchecked_t seqnum;
55646
55647 /*
55648 * NOTE:
55649diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
55650index 11f6f61..1087910 100644
55651--- a/drivers/usb/usbip/vhci_hcd.c
55652+++ b/drivers/usb/usbip/vhci_hcd.c
55653@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
55654
55655 spin_lock(&vdev->priv_lock);
55656
55657- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
55658+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55659 if (priv->seqnum == 0xffff)
55660 dev_info(&urb->dev->dev, "seqnum max\n");
55661
55662@@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
55663 return -ENOMEM;
55664 }
55665
55666- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
55667+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55668 if (unlink->seqnum == 0xffff)
55669 pr_info("seqnum max\n");
55670
55671@@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
55672 vdev->rhport = rhport;
55673 }
55674
55675- atomic_set(&vhci->seqnum, 0);
55676+ atomic_set_unchecked(&vhci->seqnum, 0);
55677 spin_lock_init(&vhci->lock);
55678
55679 hcd->power_budget = 0; /* no limit */
55680diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
55681index 00e4a54..d676f85 100644
55682--- a/drivers/usb/usbip/vhci_rx.c
55683+++ b/drivers/usb/usbip/vhci_rx.c
55684@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
55685 if (!urb) {
55686 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
55687 pr_info("max seqnum %d\n",
55688- atomic_read(&the_controller->seqnum));
55689+ atomic_read_unchecked(&the_controller->seqnum));
55690 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
55691 return;
55692 }
55693diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
55694index edc7267..9f65ce2 100644
55695--- a/drivers/usb/wusbcore/wa-hc.h
55696+++ b/drivers/usb/wusbcore/wa-hc.h
55697@@ -240,7 +240,7 @@ struct wahc {
55698 spinlock_t xfer_list_lock;
55699 struct work_struct xfer_enqueue_work;
55700 struct work_struct xfer_error_work;
55701- atomic_t xfer_id_count;
55702+ atomic_unchecked_t xfer_id_count;
55703
55704 kernel_ulong_t quirks;
55705 };
55706@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
55707 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
55708 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
55709 wa->dto_in_use = 0;
55710- atomic_set(&wa->xfer_id_count, 1);
55711+ atomic_set_unchecked(&wa->xfer_id_count, 1);
55712 /* init the buf in URBs */
55713 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
55714 usb_init_urb(&(wa->buf_in_urbs[index]));
55715diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
55716index 69af4fd..da390d7 100644
55717--- a/drivers/usb/wusbcore/wa-xfer.c
55718+++ b/drivers/usb/wusbcore/wa-xfer.c
55719@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
55720 */
55721 static void wa_xfer_id_init(struct wa_xfer *xfer)
55722 {
55723- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
55724+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
55725 }
55726
55727 /* Return the xfer's ID. */
55728diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
55729index 837d177..170724af 100644
55730--- a/drivers/vfio/vfio.c
55731+++ b/drivers/vfio/vfio.c
55732@@ -518,7 +518,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
55733 return 0;
55734
55735 /* TODO Prevent device auto probing */
55736- WARN("Device %s added to live group %d!\n", dev_name(dev),
55737+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
55738 iommu_group_id(group->iommu_group));
55739
55740 return 0;
55741diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
55742index 3bb02c6..a01ff38 100644
55743--- a/drivers/vhost/vringh.c
55744+++ b/drivers/vhost/vringh.c
55745@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
55746 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
55747 {
55748 __virtio16 v = 0;
55749- int rc = get_user(v, (__force __virtio16 __user *)p);
55750+ int rc = get_user(v, (__force_user __virtio16 *)p);
55751 *val = vringh16_to_cpu(vrh, v);
55752 return rc;
55753 }
55754@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
55755 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
55756 {
55757 __virtio16 v = cpu_to_vringh16(vrh, val);
55758- return put_user(v, (__force __virtio16 __user *)p);
55759+ return put_user(v, (__force_user __virtio16 *)p);
55760 }
55761
55762 static inline int copydesc_user(void *dst, const void *src, size_t len)
55763 {
55764- return copy_from_user(dst, (__force void __user *)src, len) ?
55765+ return copy_from_user(dst, (void __force_user *)src, len) ?
55766 -EFAULT : 0;
55767 }
55768
55769@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
55770 const struct vring_used_elem *src,
55771 unsigned int num)
55772 {
55773- return copy_to_user((__force void __user *)dst, src,
55774+ return copy_to_user((void __force_user *)dst, src,
55775 sizeof(*dst) * num) ? -EFAULT : 0;
55776 }
55777
55778 static inline int xfer_from_user(void *src, void *dst, size_t len)
55779 {
55780- return copy_from_user(dst, (__force void __user *)src, len) ?
55781+ return copy_from_user(dst, (void __force_user *)src, len) ?
55782 -EFAULT : 0;
55783 }
55784
55785 static inline int xfer_to_user(void *dst, void *src, size_t len)
55786 {
55787- return copy_to_user((__force void __user *)dst, src, len) ?
55788+ return copy_to_user((void __force_user *)dst, src, len) ?
55789 -EFAULT : 0;
55790 }
55791
55792@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
55793 vrh->last_used_idx = 0;
55794 vrh->vring.num = num;
55795 /* vring expects kernel addresses, but only used via accessors. */
55796- vrh->vring.desc = (__force struct vring_desc *)desc;
55797- vrh->vring.avail = (__force struct vring_avail *)avail;
55798- vrh->vring.used = (__force struct vring_used *)used;
55799+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
55800+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
55801+ vrh->vring.used = (__force_kernel struct vring_used *)used;
55802 return 0;
55803 }
55804 EXPORT_SYMBOL(vringh_init_user);
55805@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
55806
55807 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
55808 {
55809- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
55810+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
55811 return 0;
55812 }
55813
55814diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
55815index 84a110a..96312c3 100644
55816--- a/drivers/video/backlight/kb3886_bl.c
55817+++ b/drivers/video/backlight/kb3886_bl.c
55818@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
55819 static unsigned long kb3886bl_flags;
55820 #define KB3886BL_SUSPENDED 0x01
55821
55822-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
55823+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
55824 {
55825 .ident = "Sahara Touch-iT",
55826 .matches = {
55827diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
55828index 1b0b233..6f34c2c 100644
55829--- a/drivers/video/fbdev/arcfb.c
55830+++ b/drivers/video/fbdev/arcfb.c
55831@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
55832 return -ENOSPC;
55833
55834 err = 0;
55835- if ((count + p) > fbmemlength) {
55836+ if (count > (fbmemlength - p)) {
55837 count = fbmemlength - p;
55838 err = -ENOSPC;
55839 }
55840diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
55841index aedf2fb..47c9aca 100644
55842--- a/drivers/video/fbdev/aty/aty128fb.c
55843+++ b/drivers/video/fbdev/aty/aty128fb.c
55844@@ -149,7 +149,7 @@ enum {
55845 };
55846
55847 /* Must match above enum */
55848-static char * const r128_family[] = {
55849+static const char * const r128_family[] = {
55850 "AGP",
55851 "PCI",
55852 "PRO AGP",
55853diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
55854index 8789e48..698fe4c 100644
55855--- a/drivers/video/fbdev/aty/atyfb_base.c
55856+++ b/drivers/video/fbdev/aty/atyfb_base.c
55857@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
55858 par->accel_flags = var->accel_flags; /* hack */
55859
55860 if (var->accel_flags) {
55861- info->fbops->fb_sync = atyfb_sync;
55862+ pax_open_kernel();
55863+ *(void **)&info->fbops->fb_sync = atyfb_sync;
55864+ pax_close_kernel();
55865 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55866 } else {
55867- info->fbops->fb_sync = NULL;
55868+ pax_open_kernel();
55869+ *(void **)&info->fbops->fb_sync = NULL;
55870+ pax_close_kernel();
55871 info->flags |= FBINFO_HWACCEL_DISABLED;
55872 }
55873
55874diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
55875index 2fa0317..4983f2a 100644
55876--- a/drivers/video/fbdev/aty/mach64_cursor.c
55877+++ b/drivers/video/fbdev/aty/mach64_cursor.c
55878@@ -8,6 +8,7 @@
55879 #include "../core/fb_draw.h"
55880
55881 #include <asm/io.h>
55882+#include <asm/pgtable.h>
55883
55884 #ifdef __sparc__
55885 #include <asm/fbio.h>
55886@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
55887 info->sprite.buf_align = 16; /* and 64 lines tall. */
55888 info->sprite.flags = FB_PIXMAP_IO;
55889
55890- info->fbops->fb_cursor = atyfb_cursor;
55891+ pax_open_kernel();
55892+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55893+ pax_close_kernel();
55894
55895 return 0;
55896 }
55897diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55898index d6cab1f..112f680 100644
55899--- a/drivers/video/fbdev/core/fb_defio.c
55900+++ b/drivers/video/fbdev/core/fb_defio.c
55901@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
55902
55903 BUG_ON(!fbdefio);
55904 mutex_init(&fbdefio->lock);
55905- info->fbops->fb_mmap = fb_deferred_io_mmap;
55906+ pax_open_kernel();
55907+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55908+ pax_close_kernel();
55909 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55910 INIT_LIST_HEAD(&fbdefio->pagelist);
55911 if (fbdefio->delay == 0) /* set a default of 1 s */
55912@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55913 page->mapping = NULL;
55914 }
55915
55916- info->fbops->fb_mmap = NULL;
55917+ *(void **)&info->fbops->fb_mmap = NULL;
55918 mutex_destroy(&fbdefio->lock);
55919 }
55920 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55921diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55922index 0705d88..d9429bf 100644
55923--- a/drivers/video/fbdev/core/fbmem.c
55924+++ b/drivers/video/fbdev/core/fbmem.c
55925@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55926 __u32 data;
55927 int err;
55928
55929- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55930+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55931
55932 data = (__u32) (unsigned long) fix->smem_start;
55933 err |= put_user(data, &fix32->smem_start);
55934diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55935index 4254336..282567e 100644
55936--- a/drivers/video/fbdev/hyperv_fb.c
55937+++ b/drivers/video/fbdev/hyperv_fb.c
55938@@ -240,7 +240,7 @@ static uint screen_fb_size;
55939 static inline int synthvid_send(struct hv_device *hdev,
55940 struct synthvid_msg *msg)
55941 {
55942- static atomic64_t request_id = ATOMIC64_INIT(0);
55943+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55944 int ret;
55945
55946 msg->pipe_hdr.type = PIPE_MSG_DATA;
55947@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55948
55949 ret = vmbus_sendpacket(hdev->channel, msg,
55950 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55951- atomic64_inc_return(&request_id),
55952+ atomic64_inc_return_unchecked(&request_id),
55953 VM_PKT_DATA_INBAND, 0);
55954
55955 if (ret)
55956diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55957index 7672d2e..b56437f 100644
55958--- a/drivers/video/fbdev/i810/i810_accel.c
55959+++ b/drivers/video/fbdev/i810/i810_accel.c
55960@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55961 }
55962 }
55963 printk("ringbuffer lockup!!!\n");
55964+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55965 i810_report_error(mmio);
55966 par->dev_flags |= LOCKUP;
55967 info->pixmap.scan_align = 1;
55968diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55969index a01147f..5d896f8 100644
55970--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55971+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55972@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55973
55974 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55975 struct matrox_switch matrox_mystique = {
55976- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55977+ .preinit = MGA1064_preinit,
55978+ .reset = MGA1064_reset,
55979+ .init = MGA1064_init,
55980+ .restore = MGA1064_restore,
55981 };
55982 EXPORT_SYMBOL(matrox_mystique);
55983 #endif
55984
55985 #ifdef CONFIG_FB_MATROX_G
55986 struct matrox_switch matrox_G100 = {
55987- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55988+ .preinit = MGAG100_preinit,
55989+ .reset = MGAG100_reset,
55990+ .init = MGAG100_init,
55991+ .restore = MGAG100_restore,
55992 };
55993 EXPORT_SYMBOL(matrox_G100);
55994 #endif
55995diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55996index 195ad7c..09743fc 100644
55997--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55998+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55999@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
56000 }
56001
56002 struct matrox_switch matrox_millennium = {
56003- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
56004+ .preinit = Ti3026_preinit,
56005+ .reset = Ti3026_reset,
56006+ .init = Ti3026_init,
56007+ .restore = Ti3026_restore
56008 };
56009 EXPORT_SYMBOL(matrox_millennium);
56010 #endif
56011diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56012index fe92eed..106e085 100644
56013--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56014+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56015@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
56016 struct mb862xxfb_par *par = info->par;
56017
56018 if (info->var.bits_per_pixel == 32) {
56019- info->fbops->fb_fillrect = cfb_fillrect;
56020- info->fbops->fb_copyarea = cfb_copyarea;
56021- info->fbops->fb_imageblit = cfb_imageblit;
56022+ pax_open_kernel();
56023+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56024+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56025+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56026+ pax_close_kernel();
56027 } else {
56028 outreg(disp, GC_L0EM, 3);
56029- info->fbops->fb_fillrect = mb86290fb_fillrect;
56030- info->fbops->fb_copyarea = mb86290fb_copyarea;
56031- info->fbops->fb_imageblit = mb86290fb_imageblit;
56032+ pax_open_kernel();
56033+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56034+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56035+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56036+ pax_close_kernel();
56037 }
56038 outreg(draw, GDC_REG_DRAW_BASE, 0);
56039 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56040diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56041index def0412..fed6529 100644
56042--- a/drivers/video/fbdev/nvidia/nvidia.c
56043+++ b/drivers/video/fbdev/nvidia/nvidia.c
56044@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56045 info->fix.line_length = (info->var.xres_virtual *
56046 info->var.bits_per_pixel) >> 3;
56047 if (info->var.accel_flags) {
56048- info->fbops->fb_imageblit = nvidiafb_imageblit;
56049- info->fbops->fb_fillrect = nvidiafb_fillrect;
56050- info->fbops->fb_copyarea = nvidiafb_copyarea;
56051- info->fbops->fb_sync = nvidiafb_sync;
56052+ pax_open_kernel();
56053+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56054+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56055+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56056+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56057+ pax_close_kernel();
56058 info->pixmap.scan_align = 4;
56059 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56060 info->flags |= FBINFO_READS_FAST;
56061 NVResetGraphics(info);
56062 } else {
56063- info->fbops->fb_imageblit = cfb_imageblit;
56064- info->fbops->fb_fillrect = cfb_fillrect;
56065- info->fbops->fb_copyarea = cfb_copyarea;
56066- info->fbops->fb_sync = NULL;
56067+ pax_open_kernel();
56068+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56069+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56070+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56071+ *(void **)&info->fbops->fb_sync = NULL;
56072+ pax_close_kernel();
56073 info->pixmap.scan_align = 1;
56074 info->flags |= FBINFO_HWACCEL_DISABLED;
56075 info->flags &= ~FBINFO_READS_FAST;
56076@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56077 info->pixmap.size = 8 * 1024;
56078 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56079
56080- if (!hwcur)
56081- info->fbops->fb_cursor = NULL;
56082+ if (!hwcur) {
56083+ pax_open_kernel();
56084+ *(void **)&info->fbops->fb_cursor = NULL;
56085+ pax_close_kernel();
56086+ }
56087
56088 info->var.accel_flags = (!noaccel);
56089
56090diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56091index 2412a0d..294215b 100644
56092--- a/drivers/video/fbdev/omap2/dss/display.c
56093+++ b/drivers/video/fbdev/omap2/dss/display.c
56094@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56095 if (dssdev->name == NULL)
56096 dssdev->name = dssdev->alias;
56097
56098+ pax_open_kernel();
56099 if (drv && drv->get_resolution == NULL)
56100- drv->get_resolution = omapdss_default_get_resolution;
56101+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56102 if (drv && drv->get_recommended_bpp == NULL)
56103- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56104+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56105 if (drv && drv->get_timings == NULL)
56106- drv->get_timings = omapdss_default_get_timings;
56107+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56108+ pax_close_kernel();
56109
56110 mutex_lock(&panel_list_mutex);
56111 list_add_tail(&dssdev->panel_list, &panel_list);
56112diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56113index 83433cb..71e9b98 100644
56114--- a/drivers/video/fbdev/s1d13xxxfb.c
56115+++ b/drivers/video/fbdev/s1d13xxxfb.c
56116@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56117
56118 switch(prod_id) {
56119 case S1D13506_PROD_ID: /* activate acceleration */
56120- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56121- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56122+ pax_open_kernel();
56123+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56124+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56125+ pax_close_kernel();
56126 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56127 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56128 break;
56129diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56130index d3013cd..95b8285 100644
56131--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56132+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56133@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56134 }
56135
56136 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56137- lcdc_sys_write_index,
56138- lcdc_sys_write_data,
56139- lcdc_sys_read_data,
56140+ .write_index = lcdc_sys_write_index,
56141+ .write_data = lcdc_sys_write_data,
56142+ .read_data = lcdc_sys_read_data,
56143 };
56144
56145 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56146diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56147index 9279e5f..d5f5276 100644
56148--- a/drivers/video/fbdev/smscufx.c
56149+++ b/drivers/video/fbdev/smscufx.c
56150@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56151 fb_deferred_io_cleanup(info);
56152 kfree(info->fbdefio);
56153 info->fbdefio = NULL;
56154- info->fbops->fb_mmap = ufx_ops_mmap;
56155+ pax_open_kernel();
56156+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56157+ pax_close_kernel();
56158 }
56159
56160 pr_debug("released /dev/fb%d user=%d count=%d",
56161diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56162index ff2b873..626a8d5 100644
56163--- a/drivers/video/fbdev/udlfb.c
56164+++ b/drivers/video/fbdev/udlfb.c
56165@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56166 dlfb_urb_completion(urb);
56167
56168 error:
56169- atomic_add(bytes_sent, &dev->bytes_sent);
56170- atomic_add(bytes_identical, &dev->bytes_identical);
56171- atomic_add(width*height*2, &dev->bytes_rendered);
56172+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56173+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56174+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56175 end_cycles = get_cycles();
56176- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56177+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56178 >> 10)), /* Kcycles */
56179 &dev->cpu_kcycles_used);
56180
56181@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56182 dlfb_urb_completion(urb);
56183
56184 error:
56185- atomic_add(bytes_sent, &dev->bytes_sent);
56186- atomic_add(bytes_identical, &dev->bytes_identical);
56187- atomic_add(bytes_rendered, &dev->bytes_rendered);
56188+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56189+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56190+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56191 end_cycles = get_cycles();
56192- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56193+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56194 >> 10)), /* Kcycles */
56195 &dev->cpu_kcycles_used);
56196 }
56197@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56198 fb_deferred_io_cleanup(info);
56199 kfree(info->fbdefio);
56200 info->fbdefio = NULL;
56201- info->fbops->fb_mmap = dlfb_ops_mmap;
56202+ pax_open_kernel();
56203+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56204+ pax_close_kernel();
56205 }
56206
56207 pr_warn("released /dev/fb%d user=%d count=%d\n",
56208@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56209 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56210 struct dlfb_data *dev = fb_info->par;
56211 return snprintf(buf, PAGE_SIZE, "%u\n",
56212- atomic_read(&dev->bytes_rendered));
56213+ atomic_read_unchecked(&dev->bytes_rendered));
56214 }
56215
56216 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56217@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56218 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56219 struct dlfb_data *dev = fb_info->par;
56220 return snprintf(buf, PAGE_SIZE, "%u\n",
56221- atomic_read(&dev->bytes_identical));
56222+ atomic_read_unchecked(&dev->bytes_identical));
56223 }
56224
56225 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56226@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56227 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56228 struct dlfb_data *dev = fb_info->par;
56229 return snprintf(buf, PAGE_SIZE, "%u\n",
56230- atomic_read(&dev->bytes_sent));
56231+ atomic_read_unchecked(&dev->bytes_sent));
56232 }
56233
56234 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56235@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56236 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56237 struct dlfb_data *dev = fb_info->par;
56238 return snprintf(buf, PAGE_SIZE, "%u\n",
56239- atomic_read(&dev->cpu_kcycles_used));
56240+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56241 }
56242
56243 static ssize_t edid_show(
56244@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56245 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56246 struct dlfb_data *dev = fb_info->par;
56247
56248- atomic_set(&dev->bytes_rendered, 0);
56249- atomic_set(&dev->bytes_identical, 0);
56250- atomic_set(&dev->bytes_sent, 0);
56251- atomic_set(&dev->cpu_kcycles_used, 0);
56252+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56253+ atomic_set_unchecked(&dev->bytes_identical, 0);
56254+ atomic_set_unchecked(&dev->bytes_sent, 0);
56255+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56256
56257 return count;
56258 }
56259diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56260index d32d1c4..46722e6 100644
56261--- a/drivers/video/fbdev/uvesafb.c
56262+++ b/drivers/video/fbdev/uvesafb.c
56263@@ -19,6 +19,7 @@
56264 #include <linux/io.h>
56265 #include <linux/mutex.h>
56266 #include <linux/slab.h>
56267+#include <linux/moduleloader.h>
56268 #include <video/edid.h>
56269 #include <video/uvesafb.h>
56270 #ifdef CONFIG_X86
56271@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56272 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56273 par->pmi_setpal = par->ypan = 0;
56274 } else {
56275+
56276+#ifdef CONFIG_PAX_KERNEXEC
56277+#ifdef CONFIG_MODULES
56278+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56279+#endif
56280+ if (!par->pmi_code) {
56281+ par->pmi_setpal = par->ypan = 0;
56282+ return 0;
56283+ }
56284+#endif
56285+
56286 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56287 + task->t.regs.edi);
56288+
56289+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56290+ pax_open_kernel();
56291+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56292+ pax_close_kernel();
56293+
56294+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56295+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56296+#else
56297 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56298 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56299+#endif
56300+
56301 printk(KERN_INFO "uvesafb: protected mode interface info at "
56302 "%04x:%04x\n",
56303 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56304@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56305 par->ypan = ypan;
56306
56307 if (par->pmi_setpal || par->ypan) {
56308+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56309 if (__supported_pte_mask & _PAGE_NX) {
56310 par->pmi_setpal = par->ypan = 0;
56311 printk(KERN_WARNING "uvesafb: NX protection is active, "
56312 "better not use the PMI.\n");
56313- } else {
56314+ } else
56315+#endif
56316 uvesafb_vbe_getpmi(task, par);
56317- }
56318 }
56319 #else
56320 /* The protected mode interface is not available on non-x86. */
56321@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56322 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56323
56324 /* Disable blanking if the user requested so. */
56325- if (!blank)
56326- info->fbops->fb_blank = NULL;
56327+ if (!blank) {
56328+ pax_open_kernel();
56329+ *(void **)&info->fbops->fb_blank = NULL;
56330+ pax_close_kernel();
56331+ }
56332
56333 /*
56334 * Find out how much IO memory is required for the mode with
56335@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56336 info->flags = FBINFO_FLAG_DEFAULT |
56337 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56338
56339- if (!par->ypan)
56340- info->fbops->fb_pan_display = NULL;
56341+ if (!par->ypan) {
56342+ pax_open_kernel();
56343+ *(void **)&info->fbops->fb_pan_display = NULL;
56344+ pax_close_kernel();
56345+ }
56346 }
56347
56348 static void uvesafb_init_mtrr(struct fb_info *info)
56349@@ -1786,6 +1816,11 @@ out_mode:
56350 out:
56351 kfree(par->vbe_modes);
56352
56353+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56354+ if (par->pmi_code)
56355+ module_memfree_exec(par->pmi_code);
56356+#endif
56357+
56358 framebuffer_release(info);
56359 return err;
56360 }
56361@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
56362 kfree(par->vbe_state_orig);
56363 kfree(par->vbe_state_saved);
56364
56365+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56366+ if (par->pmi_code)
56367+ module_memfree_exec(par->pmi_code);
56368+#endif
56369+
56370 framebuffer_release(info);
56371 }
56372 return 0;
56373diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56374index d79a0ac..2d0c3d4 100644
56375--- a/drivers/video/fbdev/vesafb.c
56376+++ b/drivers/video/fbdev/vesafb.c
56377@@ -9,6 +9,7 @@
56378 */
56379
56380 #include <linux/module.h>
56381+#include <linux/moduleloader.h>
56382 #include <linux/kernel.h>
56383 #include <linux/errno.h>
56384 #include <linux/string.h>
56385@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56386 static int vram_total; /* Set total amount of memory */
56387 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56388 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56389-static void (*pmi_start)(void) __read_mostly;
56390-static void (*pmi_pal) (void) __read_mostly;
56391+static void (*pmi_start)(void) __read_only;
56392+static void (*pmi_pal) (void) __read_only;
56393 static int depth __read_mostly;
56394 static int vga_compat __read_mostly;
56395 /* --------------------------------------------------------------------- */
56396@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56397 unsigned int size_remap;
56398 unsigned int size_total;
56399 char *option = NULL;
56400+ void *pmi_code = NULL;
56401
56402 /* ignore error return of fb_get_options */
56403 fb_get_options("vesafb", &option);
56404@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56405 size_remap = size_total;
56406 vesafb_fix.smem_len = size_remap;
56407
56408-#ifndef __i386__
56409- screen_info.vesapm_seg = 0;
56410-#endif
56411-
56412 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56413 printk(KERN_WARNING
56414 "vesafb: cannot reserve video memory at 0x%lx\n",
56415@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56416 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56417 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56418
56419+#ifdef __i386__
56420+
56421+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56422+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56423+ if (!pmi_code)
56424+#elif !defined(CONFIG_PAX_KERNEXEC)
56425+ if (0)
56426+#endif
56427+
56428+#endif
56429+ screen_info.vesapm_seg = 0;
56430+
56431 if (screen_info.vesapm_seg) {
56432- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56433- screen_info.vesapm_seg,screen_info.vesapm_off);
56434+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56435+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56436 }
56437
56438 if (screen_info.vesapm_seg < 0xc000)
56439@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56440
56441 if (ypan || pmi_setpal) {
56442 unsigned short *pmi_base;
56443+
56444 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56445- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56446- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56447+
56448+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56449+ pax_open_kernel();
56450+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56451+#else
56452+ pmi_code = pmi_base;
56453+#endif
56454+
56455+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56456+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56457+
56458+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56459+ pmi_start = ktva_ktla(pmi_start);
56460+ pmi_pal = ktva_ktla(pmi_pal);
56461+ pax_close_kernel();
56462+#endif
56463+
56464 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56465 if (pmi_base[3]) {
56466 printk(KERN_INFO "vesafb: pmi: ports = ");
56467@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56468 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56469 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56470
56471- if (!ypan)
56472- info->fbops->fb_pan_display = NULL;
56473+ if (!ypan) {
56474+ pax_open_kernel();
56475+ *(void **)&info->fbops->fb_pan_display = NULL;
56476+ pax_close_kernel();
56477+ }
56478
56479 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56480 err = -ENOMEM;
56481@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56482 fb_info(info, "%s frame buffer device\n", info->fix.id);
56483 return 0;
56484 err:
56485+
56486+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56487+ module_memfree_exec(pmi_code);
56488+#endif
56489+
56490 if (info->screen_base)
56491 iounmap(info->screen_base);
56492 framebuffer_release(info);
56493diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56494index 88714ae..16c2e11 100644
56495--- a/drivers/video/fbdev/via/via_clock.h
56496+++ b/drivers/video/fbdev/via/via_clock.h
56497@@ -56,7 +56,7 @@ struct via_clock {
56498
56499 void (*set_engine_pll_state)(u8 state);
56500 void (*set_engine_pll)(struct via_pll_config config);
56501-};
56502+} __no_const;
56503
56504
56505 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56506diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56507index 3c14e43..2630570 100644
56508--- a/drivers/video/logo/logo_linux_clut224.ppm
56509+++ b/drivers/video/logo/logo_linux_clut224.ppm
56510@@ -2,1603 +2,1123 @@ P3
56511 # Standard 224-color Linux logo
56512 80 80
56513 255
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 0 0 0 0 0 0
56517- 0 0 0 0 0 0 0 0 0 0 0 0
56518- 0 0 0 0 0 0 0 0 0 0 0 0
56519- 0 0 0 0 0 0 0 0 0 0 0 0
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 0 0 0 0 0 0 0 0 0 0 0 0
56522- 0 0 0 0 0 0 0 0 0 0 0 0
56523- 6 6 6 6 6 6 10 10 10 10 10 10
56524- 10 10 10 6 6 6 6 6 6 6 6 6
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 0 0 0
56527- 0 0 0 0 0 0 0 0 0 0 0 0
56528- 0 0 0 0 0 0 0 0 0 0 0 0
56529- 0 0 0 0 0 0 0 0 0 0 0 0
56530- 0 0 0 0 0 0 0 0 0 0 0 0
56531- 0 0 0 0 0 0 0 0 0 0 0 0
56532- 0 0 0 0 0 0 0 0 0 0 0 0
56533- 0 0 0 0 0 0 0 0 0 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 0 0 0 0 0 0 0 0 0
56537- 0 0 0 0 0 0 0 0 0 0 0 0
56538- 0 0 0 0 0 0 0 0 0 0 0 0
56539- 0 0 0 0 0 0 0 0 0 0 0 0
56540- 0 0 0 0 0 0 0 0 0 0 0 0
56541- 0 0 0 0 0 0 0 0 0 0 0 0
56542- 0 0 0 6 6 6 10 10 10 14 14 14
56543- 22 22 22 26 26 26 30 30 30 34 34 34
56544- 30 30 30 30 30 30 26 26 26 18 18 18
56545- 14 14 14 10 10 10 6 6 6 0 0 0
56546- 0 0 0 0 0 0 0 0 0 0 0 0
56547- 0 0 0 0 0 0 0 0 0 0 0 0
56548- 0 0 0 0 0 0 0 0 0 0 0 0
56549- 0 0 0 0 0 0 0 0 0 0 0 0
56550- 0 0 0 0 0 0 0 0 0 0 0 0
56551- 0 0 0 0 0 0 0 0 0 0 0 0
56552- 0 0 0 0 0 0 0 0 0 0 0 0
56553- 0 0 0 0 0 0 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 1 0 0 1 0 0 0
56556- 0 0 0 0 0 0 0 0 0 0 0 0
56557- 0 0 0 0 0 0 0 0 0 0 0 0
56558- 0 0 0 0 0 0 0 0 0 0 0 0
56559- 0 0 0 0 0 0 0 0 0 0 0 0
56560- 0 0 0 0 0 0 0 0 0 0 0 0
56561- 0 0 0 0 0 0 0 0 0 0 0 0
56562- 6 6 6 14 14 14 26 26 26 42 42 42
56563- 54 54 54 66 66 66 78 78 78 78 78 78
56564- 78 78 78 74 74 74 66 66 66 54 54 54
56565- 42 42 42 26 26 26 18 18 18 10 10 10
56566- 6 6 6 0 0 0 0 0 0 0 0 0
56567- 0 0 0 0 0 0 0 0 0 0 0 0
56568- 0 0 0 0 0 0 0 0 0 0 0 0
56569- 0 0 0 0 0 0 0 0 0 0 0 0
56570- 0 0 0 0 0 0 0 0 0 0 0 0
56571- 0 0 0 0 0 0 0 0 0 0 0 0
56572- 0 0 0 0 0 0 0 0 0 0 0 0
56573- 0 0 0 0 0 0 0 0 0 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 0 0 1 0 0 0 0 0 0 0 0 0
56576- 0 0 0 0 0 0 0 0 0 0 0 0
56577- 0 0 0 0 0 0 0 0 0 0 0 0
56578- 0 0 0 0 0 0 0 0 0 0 0 0
56579- 0 0 0 0 0 0 0 0 0 0 0 0
56580- 0 0 0 0 0 0 0 0 0 0 0 0
56581- 0 0 0 0 0 0 0 0 0 10 10 10
56582- 22 22 22 42 42 42 66 66 66 86 86 86
56583- 66 66 66 38 38 38 38 38 38 22 22 22
56584- 26 26 26 34 34 34 54 54 54 66 66 66
56585- 86 86 86 70 70 70 46 46 46 26 26 26
56586- 14 14 14 6 6 6 0 0 0 0 0 0
56587- 0 0 0 0 0 0 0 0 0 0 0 0
56588- 0 0 0 0 0 0 0 0 0 0 0 0
56589- 0 0 0 0 0 0 0 0 0 0 0 0
56590- 0 0 0 0 0 0 0 0 0 0 0 0
56591- 0 0 0 0 0 0 0 0 0 0 0 0
56592- 0 0 0 0 0 0 0 0 0 0 0 0
56593- 0 0 0 0 0 0 0 0 0 0 0 0
56594- 0 0 0 0 0 0 0 0 0 0 0 0
56595- 0 0 1 0 0 1 0 0 1 0 0 0
56596- 0 0 0 0 0 0 0 0 0 0 0 0
56597- 0 0 0 0 0 0 0 0 0 0 0 0
56598- 0 0 0 0 0 0 0 0 0 0 0 0
56599- 0 0 0 0 0 0 0 0 0 0 0 0
56600- 0 0 0 0 0 0 0 0 0 0 0 0
56601- 0 0 0 0 0 0 10 10 10 26 26 26
56602- 50 50 50 82 82 82 58 58 58 6 6 6
56603- 2 2 6 2 2 6 2 2 6 2 2 6
56604- 2 2 6 2 2 6 2 2 6 2 2 6
56605- 6 6 6 54 54 54 86 86 86 66 66 66
56606- 38 38 38 18 18 18 6 6 6 0 0 0
56607- 0 0 0 0 0 0 0 0 0 0 0 0
56608- 0 0 0 0 0 0 0 0 0 0 0 0
56609- 0 0 0 0 0 0 0 0 0 0 0 0
56610- 0 0 0 0 0 0 0 0 0 0 0 0
56611- 0 0 0 0 0 0 0 0 0 0 0 0
56612- 0 0 0 0 0 0 0 0 0 0 0 0
56613- 0 0 0 0 0 0 0 0 0 0 0 0
56614- 0 0 0 0 0 0 0 0 0 0 0 0
56615- 0 0 0 0 0 0 0 0 0 0 0 0
56616- 0 0 0 0 0 0 0 0 0 0 0 0
56617- 0 0 0 0 0 0 0 0 0 0 0 0
56618- 0 0 0 0 0 0 0 0 0 0 0 0
56619- 0 0 0 0 0 0 0 0 0 0 0 0
56620- 0 0 0 0 0 0 0 0 0 0 0 0
56621- 0 0 0 6 6 6 22 22 22 50 50 50
56622- 78 78 78 34 34 34 2 2 6 2 2 6
56623- 2 2 6 2 2 6 2 2 6 2 2 6
56624- 2 2 6 2 2 6 2 2 6 2 2 6
56625- 2 2 6 2 2 6 6 6 6 70 70 70
56626- 78 78 78 46 46 46 22 22 22 6 6 6
56627- 0 0 0 0 0 0 0 0 0 0 0 0
56628- 0 0 0 0 0 0 0 0 0 0 0 0
56629- 0 0 0 0 0 0 0 0 0 0 0 0
56630- 0 0 0 0 0 0 0 0 0 0 0 0
56631- 0 0 0 0 0 0 0 0 0 0 0 0
56632- 0 0 0 0 0 0 0 0 0 0 0 0
56633- 0 0 0 0 0 0 0 0 0 0 0 0
56634- 0 0 0 0 0 0 0 0 0 0 0 0
56635- 0 0 1 0 0 1 0 0 1 0 0 0
56636- 0 0 0 0 0 0 0 0 0 0 0 0
56637- 0 0 0 0 0 0 0 0 0 0 0 0
56638- 0 0 0 0 0 0 0 0 0 0 0 0
56639- 0 0 0 0 0 0 0 0 0 0 0 0
56640- 0 0 0 0 0 0 0 0 0 0 0 0
56641- 6 6 6 18 18 18 42 42 42 82 82 82
56642- 26 26 26 2 2 6 2 2 6 2 2 6
56643- 2 2 6 2 2 6 2 2 6 2 2 6
56644- 2 2 6 2 2 6 2 2 6 14 14 14
56645- 46 46 46 34 34 34 6 6 6 2 2 6
56646- 42 42 42 78 78 78 42 42 42 18 18 18
56647- 6 6 6 0 0 0 0 0 0 0 0 0
56648- 0 0 0 0 0 0 0 0 0 0 0 0
56649- 0 0 0 0 0 0 0 0 0 0 0 0
56650- 0 0 0 0 0 0 0 0 0 0 0 0
56651- 0 0 0 0 0 0 0 0 0 0 0 0
56652- 0 0 0 0 0 0 0 0 0 0 0 0
56653- 0 0 0 0 0 0 0 0 0 0 0 0
56654- 0 0 0 0 0 0 0 0 0 0 0 0
56655- 0 0 1 0 0 0 0 0 1 0 0 0
56656- 0 0 0 0 0 0 0 0 0 0 0 0
56657- 0 0 0 0 0 0 0 0 0 0 0 0
56658- 0 0 0 0 0 0 0 0 0 0 0 0
56659- 0 0 0 0 0 0 0 0 0 0 0 0
56660- 0 0 0 0 0 0 0 0 0 0 0 0
56661- 10 10 10 30 30 30 66 66 66 58 58 58
56662- 2 2 6 2 2 6 2 2 6 2 2 6
56663- 2 2 6 2 2 6 2 2 6 2 2 6
56664- 2 2 6 2 2 6 2 2 6 26 26 26
56665- 86 86 86 101 101 101 46 46 46 10 10 10
56666- 2 2 6 58 58 58 70 70 70 34 34 34
56667- 10 10 10 0 0 0 0 0 0 0 0 0
56668- 0 0 0 0 0 0 0 0 0 0 0 0
56669- 0 0 0 0 0 0 0 0 0 0 0 0
56670- 0 0 0 0 0 0 0 0 0 0 0 0
56671- 0 0 0 0 0 0 0 0 0 0 0 0
56672- 0 0 0 0 0 0 0 0 0 0 0 0
56673- 0 0 0 0 0 0 0 0 0 0 0 0
56674- 0 0 0 0 0 0 0 0 0 0 0 0
56675- 0 0 1 0 0 1 0 0 1 0 0 0
56676- 0 0 0 0 0 0 0 0 0 0 0 0
56677- 0 0 0 0 0 0 0 0 0 0 0 0
56678- 0 0 0 0 0 0 0 0 0 0 0 0
56679- 0 0 0 0 0 0 0 0 0 0 0 0
56680- 0 0 0 0 0 0 0 0 0 0 0 0
56681- 14 14 14 42 42 42 86 86 86 10 10 10
56682- 2 2 6 2 2 6 2 2 6 2 2 6
56683- 2 2 6 2 2 6 2 2 6 2 2 6
56684- 2 2 6 2 2 6 2 2 6 30 30 30
56685- 94 94 94 94 94 94 58 58 58 26 26 26
56686- 2 2 6 6 6 6 78 78 78 54 54 54
56687- 22 22 22 6 6 6 0 0 0 0 0 0
56688- 0 0 0 0 0 0 0 0 0 0 0 0
56689- 0 0 0 0 0 0 0 0 0 0 0 0
56690- 0 0 0 0 0 0 0 0 0 0 0 0
56691- 0 0 0 0 0 0 0 0 0 0 0 0
56692- 0 0 0 0 0 0 0 0 0 0 0 0
56693- 0 0 0 0 0 0 0 0 0 0 0 0
56694- 0 0 0 0 0 0 0 0 0 0 0 0
56695- 0 0 0 0 0 0 0 0 0 0 0 0
56696- 0 0 0 0 0 0 0 0 0 0 0 0
56697- 0 0 0 0 0 0 0 0 0 0 0 0
56698- 0 0 0 0 0 0 0 0 0 0 0 0
56699- 0 0 0 0 0 0 0 0 0 0 0 0
56700- 0 0 0 0 0 0 0 0 0 6 6 6
56701- 22 22 22 62 62 62 62 62 62 2 2 6
56702- 2 2 6 2 2 6 2 2 6 2 2 6
56703- 2 2 6 2 2 6 2 2 6 2 2 6
56704- 2 2 6 2 2 6 2 2 6 26 26 26
56705- 54 54 54 38 38 38 18 18 18 10 10 10
56706- 2 2 6 2 2 6 34 34 34 82 82 82
56707- 38 38 38 14 14 14 0 0 0 0 0 0
56708- 0 0 0 0 0 0 0 0 0 0 0 0
56709- 0 0 0 0 0 0 0 0 0 0 0 0
56710- 0 0 0 0 0 0 0 0 0 0 0 0
56711- 0 0 0 0 0 0 0 0 0 0 0 0
56712- 0 0 0 0 0 0 0 0 0 0 0 0
56713- 0 0 0 0 0 0 0 0 0 0 0 0
56714- 0 0 0 0 0 0 0 0 0 0 0 0
56715- 0 0 0 0 0 1 0 0 1 0 0 0
56716- 0 0 0 0 0 0 0 0 0 0 0 0
56717- 0 0 0 0 0 0 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 0 0 0 0
56719- 0 0 0 0 0 0 0 0 0 0 0 0
56720- 0 0 0 0 0 0 0 0 0 6 6 6
56721- 30 30 30 78 78 78 30 30 30 2 2 6
56722- 2 2 6 2 2 6 2 2 6 2 2 6
56723- 2 2 6 2 2 6 2 2 6 2 2 6
56724- 2 2 6 2 2 6 2 2 6 10 10 10
56725- 10 10 10 2 2 6 2 2 6 2 2 6
56726- 2 2 6 2 2 6 2 2 6 78 78 78
56727- 50 50 50 18 18 18 6 6 6 0 0 0
56728- 0 0 0 0 0 0 0 0 0 0 0 0
56729- 0 0 0 0 0 0 0 0 0 0 0 0
56730- 0 0 0 0 0 0 0 0 0 0 0 0
56731- 0 0 0 0 0 0 0 0 0 0 0 0
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 0 0 0 0 0 0 0 0 0 0 0 0
56734- 0 0 0 0 0 0 0 0 0 0 0 0
56735- 0 0 1 0 0 0 0 0 0 0 0 0
56736- 0 0 0 0 0 0 0 0 0 0 0 0
56737- 0 0 0 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 0 0 0 0
56739- 0 0 0 0 0 0 0 0 0 0 0 0
56740- 0 0 0 0 0 0 0 0 0 10 10 10
56741- 38 38 38 86 86 86 14 14 14 2 2 6
56742- 2 2 6 2 2 6 2 2 6 2 2 6
56743- 2 2 6 2 2 6 2 2 6 2 2 6
56744- 2 2 6 2 2 6 2 2 6 2 2 6
56745- 2 2 6 2 2 6 2 2 6 2 2 6
56746- 2 2 6 2 2 6 2 2 6 54 54 54
56747- 66 66 66 26 26 26 6 6 6 0 0 0
56748- 0 0 0 0 0 0 0 0 0 0 0 0
56749- 0 0 0 0 0 0 0 0 0 0 0 0
56750- 0 0 0 0 0 0 0 0 0 0 0 0
56751- 0 0 0 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 0 0 0 0 0 0 0 0 0 0
56754- 0 0 0 0 0 0 0 0 0 0 0 0
56755- 0 0 0 0 0 1 0 0 1 0 0 0
56756- 0 0 0 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 0 0 0 0 0 0
56759- 0 0 0 0 0 0 0 0 0 0 0 0
56760- 0 0 0 0 0 0 0 0 0 14 14 14
56761- 42 42 42 82 82 82 2 2 6 2 2 6
56762- 2 2 6 6 6 6 10 10 10 2 2 6
56763- 2 2 6 2 2 6 2 2 6 2 2 6
56764- 2 2 6 2 2 6 2 2 6 6 6 6
56765- 14 14 14 10 10 10 2 2 6 2 2 6
56766- 2 2 6 2 2 6 2 2 6 18 18 18
56767- 82 82 82 34 34 34 10 10 10 0 0 0
56768- 0 0 0 0 0 0 0 0 0 0 0 0
56769- 0 0 0 0 0 0 0 0 0 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 0 0 0 0 0 0 0 0 0 0 0 0
56774- 0 0 0 0 0 0 0 0 0 0 0 0
56775- 0 0 1 0 0 0 0 0 0 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 0 0 0 0 0 0
56779- 0 0 0 0 0 0 0 0 0 0 0 0
56780- 0 0 0 0 0 0 0 0 0 14 14 14
56781- 46 46 46 86 86 86 2 2 6 2 2 6
56782- 6 6 6 6 6 6 22 22 22 34 34 34
56783- 6 6 6 2 2 6 2 2 6 2 2 6
56784- 2 2 6 2 2 6 18 18 18 34 34 34
56785- 10 10 10 50 50 50 22 22 22 2 2 6
56786- 2 2 6 2 2 6 2 2 6 10 10 10
56787- 86 86 86 42 42 42 14 14 14 0 0 0
56788- 0 0 0 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 0 0 0 0
56790- 0 0 0 0 0 0 0 0 0 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 0 0 0 0 0 0 0 0 0 0 0 0
56794- 0 0 0 0 0 0 0 0 0 0 0 0
56795- 0 0 1 0 0 1 0 0 1 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 0 0 0 0 0 0
56799- 0 0 0 0 0 0 0 0 0 0 0 0
56800- 0 0 0 0 0 0 0 0 0 14 14 14
56801- 46 46 46 86 86 86 2 2 6 2 2 6
56802- 38 38 38 116 116 116 94 94 94 22 22 22
56803- 22 22 22 2 2 6 2 2 6 2 2 6
56804- 14 14 14 86 86 86 138 138 138 162 162 162
56805-154 154 154 38 38 38 26 26 26 6 6 6
56806- 2 2 6 2 2 6 2 2 6 2 2 6
56807- 86 86 86 46 46 46 14 14 14 0 0 0
56808- 0 0 0 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 0 0 0
56813- 0 0 0 0 0 0 0 0 0 0 0 0
56814- 0 0 0 0 0 0 0 0 0 0 0 0
56815- 0 0 0 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 0 0 0 0 0 0
56819- 0 0 0 0 0 0 0 0 0 0 0 0
56820- 0 0 0 0 0 0 0 0 0 14 14 14
56821- 46 46 46 86 86 86 2 2 6 14 14 14
56822-134 134 134 198 198 198 195 195 195 116 116 116
56823- 10 10 10 2 2 6 2 2 6 6 6 6
56824-101 98 89 187 187 187 210 210 210 218 218 218
56825-214 214 214 134 134 134 14 14 14 6 6 6
56826- 2 2 6 2 2 6 2 2 6 2 2 6
56827- 86 86 86 50 50 50 18 18 18 6 6 6
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 0 0 0 1 0 0 0
56835- 0 0 1 0 0 1 0 0 1 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 0 0 0
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 14 14 14
56841- 46 46 46 86 86 86 2 2 6 54 54 54
56842-218 218 218 195 195 195 226 226 226 246 246 246
56843- 58 58 58 2 2 6 2 2 6 30 30 30
56844-210 210 210 253 253 253 174 174 174 123 123 123
56845-221 221 221 234 234 234 74 74 74 2 2 6
56846- 2 2 6 2 2 6 2 2 6 2 2 6
56847- 70 70 70 58 58 58 22 22 22 6 6 6
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 0 0 0
56859- 0 0 0 0 0 0 0 0 0 0 0 0
56860- 0 0 0 0 0 0 0 0 0 14 14 14
56861- 46 46 46 82 82 82 2 2 6 106 106 106
56862-170 170 170 26 26 26 86 86 86 226 226 226
56863-123 123 123 10 10 10 14 14 14 46 46 46
56864-231 231 231 190 190 190 6 6 6 70 70 70
56865- 90 90 90 238 238 238 158 158 158 2 2 6
56866- 2 2 6 2 2 6 2 2 6 2 2 6
56867- 70 70 70 58 58 58 22 22 22 6 6 6
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 0 0 0 0 0 0 1 0 0 0
56875- 0 0 1 0 0 1 0 0 1 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 0 0 0 0 0 0 0 0 0 0 0 0
56880- 0 0 0 0 0 0 0 0 0 14 14 14
56881- 42 42 42 86 86 86 6 6 6 116 116 116
56882-106 106 106 6 6 6 70 70 70 149 149 149
56883-128 128 128 18 18 18 38 38 38 54 54 54
56884-221 221 221 106 106 106 2 2 6 14 14 14
56885- 46 46 46 190 190 190 198 198 198 2 2 6
56886- 2 2 6 2 2 6 2 2 6 2 2 6
56887- 74 74 74 62 62 62 22 22 22 6 6 6
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 0 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 1 0 0 0
56895- 0 0 1 0 0 0 0 0 1 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 0 0 0 0 0 0 0 0 0 0
56900- 0 0 0 0 0 0 0 0 0 14 14 14
56901- 42 42 42 94 94 94 14 14 14 101 101 101
56902-128 128 128 2 2 6 18 18 18 116 116 116
56903-118 98 46 121 92 8 121 92 8 98 78 10
56904-162 162 162 106 106 106 2 2 6 2 2 6
56905- 2 2 6 195 195 195 195 195 195 6 6 6
56906- 2 2 6 2 2 6 2 2 6 2 2 6
56907- 74 74 74 62 62 62 22 22 22 6 6 6
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 0 0 0 0 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 1 0 0 1
56915- 0 0 1 0 0 0 0 0 1 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 0 0 0 0 0 0 0 0 0 0
56920- 0 0 0 0 0 0 0 0 0 10 10 10
56921- 38 38 38 90 90 90 14 14 14 58 58 58
56922-210 210 210 26 26 26 54 38 6 154 114 10
56923-226 170 11 236 186 11 225 175 15 184 144 12
56924-215 174 15 175 146 61 37 26 9 2 2 6
56925- 70 70 70 246 246 246 138 138 138 2 2 6
56926- 2 2 6 2 2 6 2 2 6 2 2 6
56927- 70 70 70 66 66 66 26 26 26 6 6 6
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 0 0 0 0 0 0 0 0 0 10 10 10
56941- 38 38 38 86 86 86 14 14 14 10 10 10
56942-195 195 195 188 164 115 192 133 9 225 175 15
56943-239 182 13 234 190 10 232 195 16 232 200 30
56944-245 207 45 241 208 19 232 195 16 184 144 12
56945-218 194 134 211 206 186 42 42 42 2 2 6
56946- 2 2 6 2 2 6 2 2 6 2 2 6
56947- 50 50 50 74 74 74 30 30 30 6 6 6
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 0 0 0 0 0 0 0 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 0 0 0 0 0 0 0 0 0 0
56960- 0 0 0 0 0 0 0 0 0 10 10 10
56961- 34 34 34 86 86 86 14 14 14 2 2 6
56962-121 87 25 192 133 9 219 162 10 239 182 13
56963-236 186 11 232 195 16 241 208 19 244 214 54
56964-246 218 60 246 218 38 246 215 20 241 208 19
56965-241 208 19 226 184 13 121 87 25 2 2 6
56966- 2 2 6 2 2 6 2 2 6 2 2 6
56967- 50 50 50 82 82 82 34 34 34 10 10 10
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 0 0 0 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 0 0 0 0 0 0 0 0 0 0
56980- 0 0 0 0 0 0 0 0 0 10 10 10
56981- 34 34 34 82 82 82 30 30 30 61 42 6
56982-180 123 7 206 145 10 230 174 11 239 182 13
56983-234 190 10 238 202 15 241 208 19 246 218 74
56984-246 218 38 246 215 20 246 215 20 246 215 20
56985-226 184 13 215 174 15 184 144 12 6 6 6
56986- 2 2 6 2 2 6 2 2 6 2 2 6
56987- 26 26 26 94 94 94 42 42 42 14 14 14
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 0 0 0
56990- 0 0 0 0 0 0 0 0 0 0 0 0
56991- 0 0 0 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 0 0 0 0 0 0 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997- 0 0 0 0 0 0 0 0 0 0 0 0
56998- 0 0 0 0 0 0 0 0 0 0 0 0
56999- 0 0 0 0 0 0 0 0 0 0 0 0
57000- 0 0 0 0 0 0 0 0 0 10 10 10
57001- 30 30 30 78 78 78 50 50 50 104 69 6
57002-192 133 9 216 158 10 236 178 12 236 186 11
57003-232 195 16 241 208 19 244 214 54 245 215 43
57004-246 215 20 246 215 20 241 208 19 198 155 10
57005-200 144 11 216 158 10 156 118 10 2 2 6
57006- 2 2 6 2 2 6 2 2 6 2 2 6
57007- 6 6 6 90 90 90 54 54 54 18 18 18
57008- 6 6 6 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 0 0 0
57011- 0 0 0 0 0 0 0 0 0 0 0 0
57012- 0 0 0 0 0 0 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 0 0 0
57015- 0 0 0 0 0 0 0 0 0 0 0 0
57016- 0 0 0 0 0 0 0 0 0 0 0 0
57017- 0 0 0 0 0 0 0 0 0 0 0 0
57018- 0 0 0 0 0 0 0 0 0 0 0 0
57019- 0 0 0 0 0 0 0 0 0 0 0 0
57020- 0 0 0 0 0 0 0 0 0 10 10 10
57021- 30 30 30 78 78 78 46 46 46 22 22 22
57022-137 92 6 210 162 10 239 182 13 238 190 10
57023-238 202 15 241 208 19 246 215 20 246 215 20
57024-241 208 19 203 166 17 185 133 11 210 150 10
57025-216 158 10 210 150 10 102 78 10 2 2 6
57026- 6 6 6 54 54 54 14 14 14 2 2 6
57027- 2 2 6 62 62 62 74 74 74 30 30 30
57028- 10 10 10 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 0 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 0 0 0
57031- 0 0 0 0 0 0 0 0 0 0 0 0
57032- 0 0 0 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 0 0 0
57035- 0 0 0 0 0 0 0 0 0 0 0 0
57036- 0 0 0 0 0 0 0 0 0 0 0 0
57037- 0 0 0 0 0 0 0 0 0 0 0 0
57038- 0 0 0 0 0 0 0 0 0 0 0 0
57039- 0 0 0 0 0 0 0 0 0 0 0 0
57040- 0 0 0 0 0 0 0 0 0 10 10 10
57041- 34 34 34 78 78 78 50 50 50 6 6 6
57042- 94 70 30 139 102 15 190 146 13 226 184 13
57043-232 200 30 232 195 16 215 174 15 190 146 13
57044-168 122 10 192 133 9 210 150 10 213 154 11
57045-202 150 34 182 157 106 101 98 89 2 2 6
57046- 2 2 6 78 78 78 116 116 116 58 58 58
57047- 2 2 6 22 22 22 90 90 90 46 46 46
57048- 18 18 18 6 6 6 0 0 0 0 0 0
57049- 0 0 0 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 0 0 0
57051- 0 0 0 0 0 0 0 0 0 0 0 0
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 0 0 0 0 0 0 0 0 0 0
57055- 0 0 0 0 0 0 0 0 0 0 0 0
57056- 0 0 0 0 0 0 0 0 0 0 0 0
57057- 0 0 0 0 0 0 0 0 0 0 0 0
57058- 0 0 0 0 0 0 0 0 0 0 0 0
57059- 0 0 0 0 0 0 0 0 0 0 0 0
57060- 0 0 0 0 0 0 0 0 0 10 10 10
57061- 38 38 38 86 86 86 50 50 50 6 6 6
57062-128 128 128 174 154 114 156 107 11 168 122 10
57063-198 155 10 184 144 12 197 138 11 200 144 11
57064-206 145 10 206 145 10 197 138 11 188 164 115
57065-195 195 195 198 198 198 174 174 174 14 14 14
57066- 2 2 6 22 22 22 116 116 116 116 116 116
57067- 22 22 22 2 2 6 74 74 74 70 70 70
57068- 30 30 30 10 10 10 0 0 0 0 0 0
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 0 0 0
57071- 0 0 0 0 0 0 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 0 0 0 0 0 0 0 0 0 0
57075- 0 0 0 0 0 0 0 0 0 0 0 0
57076- 0 0 0 0 0 0 0 0 0 0 0 0
57077- 0 0 0 0 0 0 0 0 0 0 0 0
57078- 0 0 0 0 0 0 0 0 0 0 0 0
57079- 0 0 0 0 0 0 0 0 0 0 0 0
57080- 0 0 0 0 0 0 6 6 6 18 18 18
57081- 50 50 50 101 101 101 26 26 26 10 10 10
57082-138 138 138 190 190 190 174 154 114 156 107 11
57083-197 138 11 200 144 11 197 138 11 192 133 9
57084-180 123 7 190 142 34 190 178 144 187 187 187
57085-202 202 202 221 221 221 214 214 214 66 66 66
57086- 2 2 6 2 2 6 50 50 50 62 62 62
57087- 6 6 6 2 2 6 10 10 10 90 90 90
57088- 50 50 50 18 18 18 6 6 6 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 0 0 0
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 0 0 0
57095- 0 0 0 0 0 0 0 0 0 0 0 0
57096- 0 0 0 0 0 0 0 0 0 0 0 0
57097- 0 0 0 0 0 0 0 0 0 0 0 0
57098- 0 0 0 0 0 0 0 0 0 0 0 0
57099- 0 0 0 0 0 0 0 0 0 0 0 0
57100- 0 0 0 0 0 0 10 10 10 34 34 34
57101- 74 74 74 74 74 74 2 2 6 6 6 6
57102-144 144 144 198 198 198 190 190 190 178 166 146
57103-154 121 60 156 107 11 156 107 11 168 124 44
57104-174 154 114 187 187 187 190 190 190 210 210 210
57105-246 246 246 253 253 253 253 253 253 182 182 182
57106- 6 6 6 2 2 6 2 2 6 2 2 6
57107- 2 2 6 2 2 6 2 2 6 62 62 62
57108- 74 74 74 34 34 34 14 14 14 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 0 0 0 0
57114- 0 0 0 0 0 0 0 0 0 0 0 0
57115- 0 0 0 0 0 0 0 0 0 0 0 0
57116- 0 0 0 0 0 0 0 0 0 0 0 0
57117- 0 0 0 0 0 0 0 0 0 0 0 0
57118- 0 0 0 0 0 0 0 0 0 0 0 0
57119- 0 0 0 0 0 0 0 0 0 0 0 0
57120- 0 0 0 10 10 10 22 22 22 54 54 54
57121- 94 94 94 18 18 18 2 2 6 46 46 46
57122-234 234 234 221 221 221 190 190 190 190 190 190
57123-190 190 190 187 187 187 187 187 187 190 190 190
57124-190 190 190 195 195 195 214 214 214 242 242 242
57125-253 253 253 253 253 253 253 253 253 253 253 253
57126- 82 82 82 2 2 6 2 2 6 2 2 6
57127- 2 2 6 2 2 6 2 2 6 14 14 14
57128- 86 86 86 54 54 54 22 22 22 6 6 6
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 0 0 0 0 0 0 0 0 0
57135- 0 0 0 0 0 0 0 0 0 0 0 0
57136- 0 0 0 0 0 0 0 0 0 0 0 0
57137- 0 0 0 0 0 0 0 0 0 0 0 0
57138- 0 0 0 0 0 0 0 0 0 0 0 0
57139- 0 0 0 0 0 0 0 0 0 0 0 0
57140- 6 6 6 18 18 18 46 46 46 90 90 90
57141- 46 46 46 18 18 18 6 6 6 182 182 182
57142-253 253 253 246 246 246 206 206 206 190 190 190
57143-190 190 190 190 190 190 190 190 190 190 190 190
57144-206 206 206 231 231 231 250 250 250 253 253 253
57145-253 253 253 253 253 253 253 253 253 253 253 253
57146-202 202 202 14 14 14 2 2 6 2 2 6
57147- 2 2 6 2 2 6 2 2 6 2 2 6
57148- 42 42 42 86 86 86 42 42 42 18 18 18
57149- 6 6 6 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 0 0 0 0 0 0 0 0 0 0 0 0
57155- 0 0 0 0 0 0 0 0 0 0 0 0
57156- 0 0 0 0 0 0 0 0 0 0 0 0
57157- 0 0 0 0 0 0 0 0 0 0 0 0
57158- 0 0 0 0 0 0 0 0 0 0 0 0
57159- 0 0 0 0 0 0 0 0 0 6 6 6
57160- 14 14 14 38 38 38 74 74 74 66 66 66
57161- 2 2 6 6 6 6 90 90 90 250 250 250
57162-253 253 253 253 253 253 238 238 238 198 198 198
57163-190 190 190 190 190 190 195 195 195 221 221 221
57164-246 246 246 253 253 253 253 253 253 253 253 253
57165-253 253 253 253 253 253 253 253 253 253 253 253
57166-253 253 253 82 82 82 2 2 6 2 2 6
57167- 2 2 6 2 2 6 2 2 6 2 2 6
57168- 2 2 6 78 78 78 70 70 70 34 34 34
57169- 14 14 14 6 6 6 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 0 0 0
57175- 0 0 0 0 0 0 0 0 0 0 0 0
57176- 0 0 0 0 0 0 0 0 0 0 0 0
57177- 0 0 0 0 0 0 0 0 0 0 0 0
57178- 0 0 0 0 0 0 0 0 0 0 0 0
57179- 0 0 0 0 0 0 0 0 0 14 14 14
57180- 34 34 34 66 66 66 78 78 78 6 6 6
57181- 2 2 6 18 18 18 218 218 218 253 253 253
57182-253 253 253 253 253 253 253 253 253 246 246 246
57183-226 226 226 231 231 231 246 246 246 253 253 253
57184-253 253 253 253 253 253 253 253 253 253 253 253
57185-253 253 253 253 253 253 253 253 253 253 253 253
57186-253 253 253 178 178 178 2 2 6 2 2 6
57187- 2 2 6 2 2 6 2 2 6 2 2 6
57188- 2 2 6 18 18 18 90 90 90 62 62 62
57189- 30 30 30 10 10 10 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 0 0 0 0 0 0 0 0 0 0 0 0
57197- 0 0 0 0 0 0 0 0 0 0 0 0
57198- 0 0 0 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 10 10 10 26 26 26
57200- 58 58 58 90 90 90 18 18 18 2 2 6
57201- 2 2 6 110 110 110 253 253 253 253 253 253
57202-253 253 253 253 253 253 253 253 253 253 253 253
57203-250 250 250 253 253 253 253 253 253 253 253 253
57204-253 253 253 253 253 253 253 253 253 253 253 253
57205-253 253 253 253 253 253 253 253 253 253 253 253
57206-253 253 253 231 231 231 18 18 18 2 2 6
57207- 2 2 6 2 2 6 2 2 6 2 2 6
57208- 2 2 6 2 2 6 18 18 18 94 94 94
57209- 54 54 54 26 26 26 10 10 10 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214- 0 0 0 0 0 0 0 0 0 0 0 0
57215- 0 0 0 0 0 0 0 0 0 0 0 0
57216- 0 0 0 0 0 0 0 0 0 0 0 0
57217- 0 0 0 0 0 0 0 0 0 0 0 0
57218- 0 0 0 0 0 0 0 0 0 0 0 0
57219- 0 0 0 6 6 6 22 22 22 50 50 50
57220- 90 90 90 26 26 26 2 2 6 2 2 6
57221- 14 14 14 195 195 195 250 250 250 253 253 253
57222-253 253 253 253 253 253 253 253 253 253 253 253
57223-253 253 253 253 253 253 253 253 253 253 253 253
57224-253 253 253 253 253 253 253 253 253 253 253 253
57225-253 253 253 253 253 253 253 253 253 253 253 253
57226-250 250 250 242 242 242 54 54 54 2 2 6
57227- 2 2 6 2 2 6 2 2 6 2 2 6
57228- 2 2 6 2 2 6 2 2 6 38 38 38
57229- 86 86 86 50 50 50 22 22 22 6 6 6
57230- 0 0 0 0 0 0 0 0 0 0 0 0
57231- 0 0 0 0 0 0 0 0 0 0 0 0
57232- 0 0 0 0 0 0 0 0 0 0 0 0
57233- 0 0 0 0 0 0 0 0 0 0 0 0
57234- 0 0 0 0 0 0 0 0 0 0 0 0
57235- 0 0 0 0 0 0 0 0 0 0 0 0
57236- 0 0 0 0 0 0 0 0 0 0 0 0
57237- 0 0 0 0 0 0 0 0 0 0 0 0
57238- 0 0 0 0 0 0 0 0 0 0 0 0
57239- 6 6 6 14 14 14 38 38 38 82 82 82
57240- 34 34 34 2 2 6 2 2 6 2 2 6
57241- 42 42 42 195 195 195 246 246 246 253 253 253
57242-253 253 253 253 253 253 253 253 253 250 250 250
57243-242 242 242 242 242 242 250 250 250 253 253 253
57244-253 253 253 253 253 253 253 253 253 253 253 253
57245-253 253 253 250 250 250 246 246 246 238 238 238
57246-226 226 226 231 231 231 101 101 101 6 6 6
57247- 2 2 6 2 2 6 2 2 6 2 2 6
57248- 2 2 6 2 2 6 2 2 6 2 2 6
57249- 38 38 38 82 82 82 42 42 42 14 14 14
57250- 6 6 6 0 0 0 0 0 0 0 0 0
57251- 0 0 0 0 0 0 0 0 0 0 0 0
57252- 0 0 0 0 0 0 0 0 0 0 0 0
57253- 0 0 0 0 0 0 0 0 0 0 0 0
57254- 0 0 0 0 0 0 0 0 0 0 0 0
57255- 0 0 0 0 0 0 0 0 0 0 0 0
57256- 0 0 0 0 0 0 0 0 0 0 0 0
57257- 0 0 0 0 0 0 0 0 0 0 0 0
57258- 0 0 0 0 0 0 0 0 0 0 0 0
57259- 10 10 10 26 26 26 62 62 62 66 66 66
57260- 2 2 6 2 2 6 2 2 6 6 6 6
57261- 70 70 70 170 170 170 206 206 206 234 234 234
57262-246 246 246 250 250 250 250 250 250 238 238 238
57263-226 226 226 231 231 231 238 238 238 250 250 250
57264-250 250 250 250 250 250 246 246 246 231 231 231
57265-214 214 214 206 206 206 202 202 202 202 202 202
57266-198 198 198 202 202 202 182 182 182 18 18 18
57267- 2 2 6 2 2 6 2 2 6 2 2 6
57268- 2 2 6 2 2 6 2 2 6 2 2 6
57269- 2 2 6 62 62 62 66 66 66 30 30 30
57270- 10 10 10 0 0 0 0 0 0 0 0 0
57271- 0 0 0 0 0 0 0 0 0 0 0 0
57272- 0 0 0 0 0 0 0 0 0 0 0 0
57273- 0 0 0 0 0 0 0 0 0 0 0 0
57274- 0 0 0 0 0 0 0 0 0 0 0 0
57275- 0 0 0 0 0 0 0 0 0 0 0 0
57276- 0 0 0 0 0 0 0 0 0 0 0 0
57277- 0 0 0 0 0 0 0 0 0 0 0 0
57278- 0 0 0 0 0 0 0 0 0 0 0 0
57279- 14 14 14 42 42 42 82 82 82 18 18 18
57280- 2 2 6 2 2 6 2 2 6 10 10 10
57281- 94 94 94 182 182 182 218 218 218 242 242 242
57282-250 250 250 253 253 253 253 253 253 250 250 250
57283-234 234 234 253 253 253 253 253 253 253 253 253
57284-253 253 253 253 253 253 253 253 253 246 246 246
57285-238 238 238 226 226 226 210 210 210 202 202 202
57286-195 195 195 195 195 195 210 210 210 158 158 158
57287- 6 6 6 14 14 14 50 50 50 14 14 14
57288- 2 2 6 2 2 6 2 2 6 2 2 6
57289- 2 2 6 6 6 6 86 86 86 46 46 46
57290- 18 18 18 6 6 6 0 0 0 0 0 0
57291- 0 0 0 0 0 0 0 0 0 0 0 0
57292- 0 0 0 0 0 0 0 0 0 0 0 0
57293- 0 0 0 0 0 0 0 0 0 0 0 0
57294- 0 0 0 0 0 0 0 0 0 0 0 0
57295- 0 0 0 0 0 0 0 0 0 0 0 0
57296- 0 0 0 0 0 0 0 0 0 0 0 0
57297- 0 0 0 0 0 0 0 0 0 0 0 0
57298- 0 0 0 0 0 0 0 0 0 6 6 6
57299- 22 22 22 54 54 54 70 70 70 2 2 6
57300- 2 2 6 10 10 10 2 2 6 22 22 22
57301-166 166 166 231 231 231 250 250 250 253 253 253
57302-253 253 253 253 253 253 253 253 253 250 250 250
57303-242 242 242 253 253 253 253 253 253 253 253 253
57304-253 253 253 253 253 253 253 253 253 253 253 253
57305-253 253 253 253 253 253 253 253 253 246 246 246
57306-231 231 231 206 206 206 198 198 198 226 226 226
57307- 94 94 94 2 2 6 6 6 6 38 38 38
57308- 30 30 30 2 2 6 2 2 6 2 2 6
57309- 2 2 6 2 2 6 62 62 62 66 66 66
57310- 26 26 26 10 10 10 0 0 0 0 0 0
57311- 0 0 0 0 0 0 0 0 0 0 0 0
57312- 0 0 0 0 0 0 0 0 0 0 0 0
57313- 0 0 0 0 0 0 0 0 0 0 0 0
57314- 0 0 0 0 0 0 0 0 0 0 0 0
57315- 0 0 0 0 0 0 0 0 0 0 0 0
57316- 0 0 0 0 0 0 0 0 0 0 0 0
57317- 0 0 0 0 0 0 0 0 0 0 0 0
57318- 0 0 0 0 0 0 0 0 0 10 10 10
57319- 30 30 30 74 74 74 50 50 50 2 2 6
57320- 26 26 26 26 26 26 2 2 6 106 106 106
57321-238 238 238 253 253 253 253 253 253 253 253 253
57322-253 253 253 253 253 253 253 253 253 253 253 253
57323-253 253 253 253 253 253 253 253 253 253 253 253
57324-253 253 253 253 253 253 253 253 253 253 253 253
57325-253 253 253 253 253 253 253 253 253 253 253 253
57326-253 253 253 246 246 246 218 218 218 202 202 202
57327-210 210 210 14 14 14 2 2 6 2 2 6
57328- 30 30 30 22 22 22 2 2 6 2 2 6
57329- 2 2 6 2 2 6 18 18 18 86 86 86
57330- 42 42 42 14 14 14 0 0 0 0 0 0
57331- 0 0 0 0 0 0 0 0 0 0 0 0
57332- 0 0 0 0 0 0 0 0 0 0 0 0
57333- 0 0 0 0 0 0 0 0 0 0 0 0
57334- 0 0 0 0 0 0 0 0 0 0 0 0
57335- 0 0 0 0 0 0 0 0 0 0 0 0
57336- 0 0 0 0 0 0 0 0 0 0 0 0
57337- 0 0 0 0 0 0 0 0 0 0 0 0
57338- 0 0 0 0 0 0 0 0 0 14 14 14
57339- 42 42 42 90 90 90 22 22 22 2 2 6
57340- 42 42 42 2 2 6 18 18 18 218 218 218
57341-253 253 253 253 253 253 253 253 253 253 253 253
57342-253 253 253 253 253 253 253 253 253 253 253 253
57343-253 253 253 253 253 253 253 253 253 253 253 253
57344-253 253 253 253 253 253 253 253 253 253 253 253
57345-253 253 253 253 253 253 253 253 253 253 253 253
57346-253 253 253 253 253 253 250 250 250 221 221 221
57347-218 218 218 101 101 101 2 2 6 14 14 14
57348- 18 18 18 38 38 38 10 10 10 2 2 6
57349- 2 2 6 2 2 6 2 2 6 78 78 78
57350- 58 58 58 22 22 22 6 6 6 0 0 0
57351- 0 0 0 0 0 0 0 0 0 0 0 0
57352- 0 0 0 0 0 0 0 0 0 0 0 0
57353- 0 0 0 0 0 0 0 0 0 0 0 0
57354- 0 0 0 0 0 0 0 0 0 0 0 0
57355- 0 0 0 0 0 0 0 0 0 0 0 0
57356- 0 0 0 0 0 0 0 0 0 0 0 0
57357- 0 0 0 0 0 0 0 0 0 0 0 0
57358- 0 0 0 0 0 0 6 6 6 18 18 18
57359- 54 54 54 82 82 82 2 2 6 26 26 26
57360- 22 22 22 2 2 6 123 123 123 253 253 253
57361-253 253 253 253 253 253 253 253 253 253 253 253
57362-253 253 253 253 253 253 253 253 253 253 253 253
57363-253 253 253 253 253 253 253 253 253 253 253 253
57364-253 253 253 253 253 253 253 253 253 253 253 253
57365-253 253 253 253 253 253 253 253 253 253 253 253
57366-253 253 253 253 253 253 253 253 253 250 250 250
57367-238 238 238 198 198 198 6 6 6 38 38 38
57368- 58 58 58 26 26 26 38 38 38 2 2 6
57369- 2 2 6 2 2 6 2 2 6 46 46 46
57370- 78 78 78 30 30 30 10 10 10 0 0 0
57371- 0 0 0 0 0 0 0 0 0 0 0 0
57372- 0 0 0 0 0 0 0 0 0 0 0 0
57373- 0 0 0 0 0 0 0 0 0 0 0 0
57374- 0 0 0 0 0 0 0 0 0 0 0 0
57375- 0 0 0 0 0 0 0 0 0 0 0 0
57376- 0 0 0 0 0 0 0 0 0 0 0 0
57377- 0 0 0 0 0 0 0 0 0 0 0 0
57378- 0 0 0 0 0 0 10 10 10 30 30 30
57379- 74 74 74 58 58 58 2 2 6 42 42 42
57380- 2 2 6 22 22 22 231 231 231 253 253 253
57381-253 253 253 253 253 253 253 253 253 253 253 253
57382-253 253 253 253 253 253 253 253 253 250 250 250
57383-253 253 253 253 253 253 253 253 253 253 253 253
57384-253 253 253 253 253 253 253 253 253 253 253 253
57385-253 253 253 253 253 253 253 253 253 253 253 253
57386-253 253 253 253 253 253 253 253 253 253 253 253
57387-253 253 253 246 246 246 46 46 46 38 38 38
57388- 42 42 42 14 14 14 38 38 38 14 14 14
57389- 2 2 6 2 2 6 2 2 6 6 6 6
57390- 86 86 86 46 46 46 14 14 14 0 0 0
57391- 0 0 0 0 0 0 0 0 0 0 0 0
57392- 0 0 0 0 0 0 0 0 0 0 0 0
57393- 0 0 0 0 0 0 0 0 0 0 0 0
57394- 0 0 0 0 0 0 0 0 0 0 0 0
57395- 0 0 0 0 0 0 0 0 0 0 0 0
57396- 0 0 0 0 0 0 0 0 0 0 0 0
57397- 0 0 0 0 0 0 0 0 0 0 0 0
57398- 0 0 0 6 6 6 14 14 14 42 42 42
57399- 90 90 90 18 18 18 18 18 18 26 26 26
57400- 2 2 6 116 116 116 253 253 253 253 253 253
57401-253 253 253 253 253 253 253 253 253 253 253 253
57402-253 253 253 253 253 253 250 250 250 238 238 238
57403-253 253 253 253 253 253 253 253 253 253 253 253
57404-253 253 253 253 253 253 253 253 253 253 253 253
57405-253 253 253 253 253 253 253 253 253 253 253 253
57406-253 253 253 253 253 253 253 253 253 253 253 253
57407-253 253 253 253 253 253 94 94 94 6 6 6
57408- 2 2 6 2 2 6 10 10 10 34 34 34
57409- 2 2 6 2 2 6 2 2 6 2 2 6
57410- 74 74 74 58 58 58 22 22 22 6 6 6
57411- 0 0 0 0 0 0 0 0 0 0 0 0
57412- 0 0 0 0 0 0 0 0 0 0 0 0
57413- 0 0 0 0 0 0 0 0 0 0 0 0
57414- 0 0 0 0 0 0 0 0 0 0 0 0
57415- 0 0 0 0 0 0 0 0 0 0 0 0
57416- 0 0 0 0 0 0 0 0 0 0 0 0
57417- 0 0 0 0 0 0 0 0 0 0 0 0
57418- 0 0 0 10 10 10 26 26 26 66 66 66
57419- 82 82 82 2 2 6 38 38 38 6 6 6
57420- 14 14 14 210 210 210 253 253 253 253 253 253
57421-253 253 253 253 253 253 253 253 253 253 253 253
57422-253 253 253 253 253 253 246 246 246 242 242 242
57423-253 253 253 253 253 253 253 253 253 253 253 253
57424-253 253 253 253 253 253 253 253 253 253 253 253
57425-253 253 253 253 253 253 253 253 253 253 253 253
57426-253 253 253 253 253 253 253 253 253 253 253 253
57427-253 253 253 253 253 253 144 144 144 2 2 6
57428- 2 2 6 2 2 6 2 2 6 46 46 46
57429- 2 2 6 2 2 6 2 2 6 2 2 6
57430- 42 42 42 74 74 74 30 30 30 10 10 10
57431- 0 0 0 0 0 0 0 0 0 0 0 0
57432- 0 0 0 0 0 0 0 0 0 0 0 0
57433- 0 0 0 0 0 0 0 0 0 0 0 0
57434- 0 0 0 0 0 0 0 0 0 0 0 0
57435- 0 0 0 0 0 0 0 0 0 0 0 0
57436- 0 0 0 0 0 0 0 0 0 0 0 0
57437- 0 0 0 0 0 0 0 0 0 0 0 0
57438- 6 6 6 14 14 14 42 42 42 90 90 90
57439- 26 26 26 6 6 6 42 42 42 2 2 6
57440- 74 74 74 250 250 250 253 253 253 253 253 253
57441-253 253 253 253 253 253 253 253 253 253 253 253
57442-253 253 253 253 253 253 242 242 242 242 242 242
57443-253 253 253 253 253 253 253 253 253 253 253 253
57444-253 253 253 253 253 253 253 253 253 253 253 253
57445-253 253 253 253 253 253 253 253 253 253 253 253
57446-253 253 253 253 253 253 253 253 253 253 253 253
57447-253 253 253 253 253 253 182 182 182 2 2 6
57448- 2 2 6 2 2 6 2 2 6 46 46 46
57449- 2 2 6 2 2 6 2 2 6 2 2 6
57450- 10 10 10 86 86 86 38 38 38 10 10 10
57451- 0 0 0 0 0 0 0 0 0 0 0 0
57452- 0 0 0 0 0 0 0 0 0 0 0 0
57453- 0 0 0 0 0 0 0 0 0 0 0 0
57454- 0 0 0 0 0 0 0 0 0 0 0 0
57455- 0 0 0 0 0 0 0 0 0 0 0 0
57456- 0 0 0 0 0 0 0 0 0 0 0 0
57457- 0 0 0 0 0 0 0 0 0 0 0 0
57458- 10 10 10 26 26 26 66 66 66 82 82 82
57459- 2 2 6 22 22 22 18 18 18 2 2 6
57460-149 149 149 253 253 253 253 253 253 253 253 253
57461-253 253 253 253 253 253 253 253 253 253 253 253
57462-253 253 253 253 253 253 234 234 234 242 242 242
57463-253 253 253 253 253 253 253 253 253 253 253 253
57464-253 253 253 253 253 253 253 253 253 253 253 253
57465-253 253 253 253 253 253 253 253 253 253 253 253
57466-253 253 253 253 253 253 253 253 253 253 253 253
57467-253 253 253 253 253 253 206 206 206 2 2 6
57468- 2 2 6 2 2 6 2 2 6 38 38 38
57469- 2 2 6 2 2 6 2 2 6 2 2 6
57470- 6 6 6 86 86 86 46 46 46 14 14 14
57471- 0 0 0 0 0 0 0 0 0 0 0 0
57472- 0 0 0 0 0 0 0 0 0 0 0 0
57473- 0 0 0 0 0 0 0 0 0 0 0 0
57474- 0 0 0 0 0 0 0 0 0 0 0 0
57475- 0 0 0 0 0 0 0 0 0 0 0 0
57476- 0 0 0 0 0 0 0 0 0 0 0 0
57477- 0 0 0 0 0 0 0 0 0 6 6 6
57478- 18 18 18 46 46 46 86 86 86 18 18 18
57479- 2 2 6 34 34 34 10 10 10 6 6 6
57480-210 210 210 253 253 253 253 253 253 253 253 253
57481-253 253 253 253 253 253 253 253 253 253 253 253
57482-253 253 253 253 253 253 234 234 234 242 242 242
57483-253 253 253 253 253 253 253 253 253 253 253 253
57484-253 253 253 253 253 253 253 253 253 253 253 253
57485-253 253 253 253 253 253 253 253 253 253 253 253
57486-253 253 253 253 253 253 253 253 253 253 253 253
57487-253 253 253 253 253 253 221 221 221 6 6 6
57488- 2 2 6 2 2 6 6 6 6 30 30 30
57489- 2 2 6 2 2 6 2 2 6 2 2 6
57490- 2 2 6 82 82 82 54 54 54 18 18 18
57491- 6 6 6 0 0 0 0 0 0 0 0 0
57492- 0 0 0 0 0 0 0 0 0 0 0 0
57493- 0 0 0 0 0 0 0 0 0 0 0 0
57494- 0 0 0 0 0 0 0 0 0 0 0 0
57495- 0 0 0 0 0 0 0 0 0 0 0 0
57496- 0 0 0 0 0 0 0 0 0 0 0 0
57497- 0 0 0 0 0 0 0 0 0 10 10 10
57498- 26 26 26 66 66 66 62 62 62 2 2 6
57499- 2 2 6 38 38 38 10 10 10 26 26 26
57500-238 238 238 253 253 253 253 253 253 253 253 253
57501-253 253 253 253 253 253 253 253 253 253 253 253
57502-253 253 253 253 253 253 231 231 231 238 238 238
57503-253 253 253 253 253 253 253 253 253 253 253 253
57504-253 253 253 253 253 253 253 253 253 253 253 253
57505-253 253 253 253 253 253 253 253 253 253 253 253
57506-253 253 253 253 253 253 253 253 253 253 253 253
57507-253 253 253 253 253 253 231 231 231 6 6 6
57508- 2 2 6 2 2 6 10 10 10 30 30 30
57509- 2 2 6 2 2 6 2 2 6 2 2 6
57510- 2 2 6 66 66 66 58 58 58 22 22 22
57511- 6 6 6 0 0 0 0 0 0 0 0 0
57512- 0 0 0 0 0 0 0 0 0 0 0 0
57513- 0 0 0 0 0 0 0 0 0 0 0 0
57514- 0 0 0 0 0 0 0 0 0 0 0 0
57515- 0 0 0 0 0 0 0 0 0 0 0 0
57516- 0 0 0 0 0 0 0 0 0 0 0 0
57517- 0 0 0 0 0 0 0 0 0 10 10 10
57518- 38 38 38 78 78 78 6 6 6 2 2 6
57519- 2 2 6 46 46 46 14 14 14 42 42 42
57520-246 246 246 253 253 253 253 253 253 253 253 253
57521-253 253 253 253 253 253 253 253 253 253 253 253
57522-253 253 253 253 253 253 231 231 231 242 242 242
57523-253 253 253 253 253 253 253 253 253 253 253 253
57524-253 253 253 253 253 253 253 253 253 253 253 253
57525-253 253 253 253 253 253 253 253 253 253 253 253
57526-253 253 253 253 253 253 253 253 253 253 253 253
57527-253 253 253 253 253 253 234 234 234 10 10 10
57528- 2 2 6 2 2 6 22 22 22 14 14 14
57529- 2 2 6 2 2 6 2 2 6 2 2 6
57530- 2 2 6 66 66 66 62 62 62 22 22 22
57531- 6 6 6 0 0 0 0 0 0 0 0 0
57532- 0 0 0 0 0 0 0 0 0 0 0 0
57533- 0 0 0 0 0 0 0 0 0 0 0 0
57534- 0 0 0 0 0 0 0 0 0 0 0 0
57535- 0 0 0 0 0 0 0 0 0 0 0 0
57536- 0 0 0 0 0 0 0 0 0 0 0 0
57537- 0 0 0 0 0 0 6 6 6 18 18 18
57538- 50 50 50 74 74 74 2 2 6 2 2 6
57539- 14 14 14 70 70 70 34 34 34 62 62 62
57540-250 250 250 253 253 253 253 253 253 253 253 253
57541-253 253 253 253 253 253 253 253 253 253 253 253
57542-253 253 253 253 253 253 231 231 231 246 246 246
57543-253 253 253 253 253 253 253 253 253 253 253 253
57544-253 253 253 253 253 253 253 253 253 253 253 253
57545-253 253 253 253 253 253 253 253 253 253 253 253
57546-253 253 253 253 253 253 253 253 253 253 253 253
57547-253 253 253 253 253 253 234 234 234 14 14 14
57548- 2 2 6 2 2 6 30 30 30 2 2 6
57549- 2 2 6 2 2 6 2 2 6 2 2 6
57550- 2 2 6 66 66 66 62 62 62 22 22 22
57551- 6 6 6 0 0 0 0 0 0 0 0 0
57552- 0 0 0 0 0 0 0 0 0 0 0 0
57553- 0 0 0 0 0 0 0 0 0 0 0 0
57554- 0 0 0 0 0 0 0 0 0 0 0 0
57555- 0 0 0 0 0 0 0 0 0 0 0 0
57556- 0 0 0 0 0 0 0 0 0 0 0 0
57557- 0 0 0 0 0 0 6 6 6 18 18 18
57558- 54 54 54 62 62 62 2 2 6 2 2 6
57559- 2 2 6 30 30 30 46 46 46 70 70 70
57560-250 250 250 253 253 253 253 253 253 253 253 253
57561-253 253 253 253 253 253 253 253 253 253 253 253
57562-253 253 253 253 253 253 231 231 231 246 246 246
57563-253 253 253 253 253 253 253 253 253 253 253 253
57564-253 253 253 253 253 253 253 253 253 253 253 253
57565-253 253 253 253 253 253 253 253 253 253 253 253
57566-253 253 253 253 253 253 253 253 253 253 253 253
57567-253 253 253 253 253 253 226 226 226 10 10 10
57568- 2 2 6 6 6 6 30 30 30 2 2 6
57569- 2 2 6 2 2 6 2 2 6 2 2 6
57570- 2 2 6 66 66 66 58 58 58 22 22 22
57571- 6 6 6 0 0 0 0 0 0 0 0 0
57572- 0 0 0 0 0 0 0 0 0 0 0 0
57573- 0 0 0 0 0 0 0 0 0 0 0 0
57574- 0 0 0 0 0 0 0 0 0 0 0 0
57575- 0 0 0 0 0 0 0 0 0 0 0 0
57576- 0 0 0 0 0 0 0 0 0 0 0 0
57577- 0 0 0 0 0 0 6 6 6 22 22 22
57578- 58 58 58 62 62 62 2 2 6 2 2 6
57579- 2 2 6 2 2 6 30 30 30 78 78 78
57580-250 250 250 253 253 253 253 253 253 253 253 253
57581-253 253 253 253 253 253 253 253 253 253 253 253
57582-253 253 253 253 253 253 231 231 231 246 246 246
57583-253 253 253 253 253 253 253 253 253 253 253 253
57584-253 253 253 253 253 253 253 253 253 253 253 253
57585-253 253 253 253 253 253 253 253 253 253 253 253
57586-253 253 253 253 253 253 253 253 253 253 253 253
57587-253 253 253 253 253 253 206 206 206 2 2 6
57588- 22 22 22 34 34 34 18 14 6 22 22 22
57589- 26 26 26 18 18 18 6 6 6 2 2 6
57590- 2 2 6 82 82 82 54 54 54 18 18 18
57591- 6 6 6 0 0 0 0 0 0 0 0 0
57592- 0 0 0 0 0 0 0 0 0 0 0 0
57593- 0 0 0 0 0 0 0 0 0 0 0 0
57594- 0 0 0 0 0 0 0 0 0 0 0 0
57595- 0 0 0 0 0 0 0 0 0 0 0 0
57596- 0 0 0 0 0 0 0 0 0 0 0 0
57597- 0 0 0 0 0 0 6 6 6 26 26 26
57598- 62 62 62 106 106 106 74 54 14 185 133 11
57599-210 162 10 121 92 8 6 6 6 62 62 62
57600-238 238 238 253 253 253 253 253 253 253 253 253
57601-253 253 253 253 253 253 253 253 253 253 253 253
57602-253 253 253 253 253 253 231 231 231 246 246 246
57603-253 253 253 253 253 253 253 253 253 253 253 253
57604-253 253 253 253 253 253 253 253 253 253 253 253
57605-253 253 253 253 253 253 253 253 253 253 253 253
57606-253 253 253 253 253 253 253 253 253 253 253 253
57607-253 253 253 253 253 253 158 158 158 18 18 18
57608- 14 14 14 2 2 6 2 2 6 2 2 6
57609- 6 6 6 18 18 18 66 66 66 38 38 38
57610- 6 6 6 94 94 94 50 50 50 18 18 18
57611- 6 6 6 0 0 0 0 0 0 0 0 0
57612- 0 0 0 0 0 0 0 0 0 0 0 0
57613- 0 0 0 0 0 0 0 0 0 0 0 0
57614- 0 0 0 0 0 0 0 0 0 0 0 0
57615- 0 0 0 0 0 0 0 0 0 0 0 0
57616- 0 0 0 0 0 0 0 0 0 6 6 6
57617- 10 10 10 10 10 10 18 18 18 38 38 38
57618- 78 78 78 142 134 106 216 158 10 242 186 14
57619-246 190 14 246 190 14 156 118 10 10 10 10
57620- 90 90 90 238 238 238 253 253 253 253 253 253
57621-253 253 253 253 253 253 253 253 253 253 253 253
57622-253 253 253 253 253 253 231 231 231 250 250 250
57623-253 253 253 253 253 253 253 253 253 253 253 253
57624-253 253 253 253 253 253 253 253 253 253 253 253
57625-253 253 253 253 253 253 253 253 253 253 253 253
57626-253 253 253 253 253 253 253 253 253 246 230 190
57627-238 204 91 238 204 91 181 142 44 37 26 9
57628- 2 2 6 2 2 6 2 2 6 2 2 6
57629- 2 2 6 2 2 6 38 38 38 46 46 46
57630- 26 26 26 106 106 106 54 54 54 18 18 18
57631- 6 6 6 0 0 0 0 0 0 0 0 0
57632- 0 0 0 0 0 0 0 0 0 0 0 0
57633- 0 0 0 0 0 0 0 0 0 0 0 0
57634- 0 0 0 0 0 0 0 0 0 0 0 0
57635- 0 0 0 0 0 0 0 0 0 0 0 0
57636- 0 0 0 6 6 6 14 14 14 22 22 22
57637- 30 30 30 38 38 38 50 50 50 70 70 70
57638-106 106 106 190 142 34 226 170 11 242 186 14
57639-246 190 14 246 190 14 246 190 14 154 114 10
57640- 6 6 6 74 74 74 226 226 226 253 253 253
57641-253 253 253 253 253 253 253 253 253 253 253 253
57642-253 253 253 253 253 253 231 231 231 250 250 250
57643-253 253 253 253 253 253 253 253 253 253 253 253
57644-253 253 253 253 253 253 253 253 253 253 253 253
57645-253 253 253 253 253 253 253 253 253 253 253 253
57646-253 253 253 253 253 253 253 253 253 228 184 62
57647-241 196 14 241 208 19 232 195 16 38 30 10
57648- 2 2 6 2 2 6 2 2 6 2 2 6
57649- 2 2 6 6 6 6 30 30 30 26 26 26
57650-203 166 17 154 142 90 66 66 66 26 26 26
57651- 6 6 6 0 0 0 0 0 0 0 0 0
57652- 0 0 0 0 0 0 0 0 0 0 0 0
57653- 0 0 0 0 0 0 0 0 0 0 0 0
57654- 0 0 0 0 0 0 0 0 0 0 0 0
57655- 0 0 0 0 0 0 0 0 0 0 0 0
57656- 6 6 6 18 18 18 38 38 38 58 58 58
57657- 78 78 78 86 86 86 101 101 101 123 123 123
57658-175 146 61 210 150 10 234 174 13 246 186 14
57659-246 190 14 246 190 14 246 190 14 238 190 10
57660-102 78 10 2 2 6 46 46 46 198 198 198
57661-253 253 253 253 253 253 253 253 253 253 253 253
57662-253 253 253 253 253 253 234 234 234 242 242 242
57663-253 253 253 253 253 253 253 253 253 253 253 253
57664-253 253 253 253 253 253 253 253 253 253 253 253
57665-253 253 253 253 253 253 253 253 253 253 253 253
57666-253 253 253 253 253 253 253 253 253 224 178 62
57667-242 186 14 241 196 14 210 166 10 22 18 6
57668- 2 2 6 2 2 6 2 2 6 2 2 6
57669- 2 2 6 2 2 6 6 6 6 121 92 8
57670-238 202 15 232 195 16 82 82 82 34 34 34
57671- 10 10 10 0 0 0 0 0 0 0 0 0
57672- 0 0 0 0 0 0 0 0 0 0 0 0
57673- 0 0 0 0 0 0 0 0 0 0 0 0
57674- 0 0 0 0 0 0 0 0 0 0 0 0
57675- 0 0 0 0 0 0 0 0 0 0 0 0
57676- 14 14 14 38 38 38 70 70 70 154 122 46
57677-190 142 34 200 144 11 197 138 11 197 138 11
57678-213 154 11 226 170 11 242 186 14 246 190 14
57679-246 190 14 246 190 14 246 190 14 246 190 14
57680-225 175 15 46 32 6 2 2 6 22 22 22
57681-158 158 158 250 250 250 253 253 253 253 253 253
57682-253 253 253 253 253 253 253 253 253 253 253 253
57683-253 253 253 253 253 253 253 253 253 253 253 253
57684-253 253 253 253 253 253 253 253 253 253 253 253
57685-253 253 253 253 253 253 253 253 253 253 253 253
57686-253 253 253 250 250 250 242 242 242 224 178 62
57687-239 182 13 236 186 11 213 154 11 46 32 6
57688- 2 2 6 2 2 6 2 2 6 2 2 6
57689- 2 2 6 2 2 6 61 42 6 225 175 15
57690-238 190 10 236 186 11 112 100 78 42 42 42
57691- 14 14 14 0 0 0 0 0 0 0 0 0
57692- 0 0 0 0 0 0 0 0 0 0 0 0
57693- 0 0 0 0 0 0 0 0 0 0 0 0
57694- 0 0 0 0 0 0 0 0 0 0 0 0
57695- 0 0 0 0 0 0 0 0 0 6 6 6
57696- 22 22 22 54 54 54 154 122 46 213 154 11
57697-226 170 11 230 174 11 226 170 11 226 170 11
57698-236 178 12 242 186 14 246 190 14 246 190 14
57699-246 190 14 246 190 14 246 190 14 246 190 14
57700-241 196 14 184 144 12 10 10 10 2 2 6
57701- 6 6 6 116 116 116 242 242 242 253 253 253
57702-253 253 253 253 253 253 253 253 253 253 253 253
57703-253 253 253 253 253 253 253 253 253 253 253 253
57704-253 253 253 253 253 253 253 253 253 253 253 253
57705-253 253 253 253 253 253 253 253 253 253 253 253
57706-253 253 253 231 231 231 198 198 198 214 170 54
57707-236 178 12 236 178 12 210 150 10 137 92 6
57708- 18 14 6 2 2 6 2 2 6 2 2 6
57709- 6 6 6 70 47 6 200 144 11 236 178 12
57710-239 182 13 239 182 13 124 112 88 58 58 58
57711- 22 22 22 6 6 6 0 0 0 0 0 0
57712- 0 0 0 0 0 0 0 0 0 0 0 0
57713- 0 0 0 0 0 0 0 0 0 0 0 0
57714- 0 0 0 0 0 0 0 0 0 0 0 0
57715- 0 0 0 0 0 0 0 0 0 10 10 10
57716- 30 30 30 70 70 70 180 133 36 226 170 11
57717-239 182 13 242 186 14 242 186 14 246 186 14
57718-246 190 14 246 190 14 246 190 14 246 190 14
57719-246 190 14 246 190 14 246 190 14 246 190 14
57720-246 190 14 232 195 16 98 70 6 2 2 6
57721- 2 2 6 2 2 6 66 66 66 221 221 221
57722-253 253 253 253 253 253 253 253 253 253 253 253
57723-253 253 253 253 253 253 253 253 253 253 253 253
57724-253 253 253 253 253 253 253 253 253 253 253 253
57725-253 253 253 253 253 253 253 253 253 253 253 253
57726-253 253 253 206 206 206 198 198 198 214 166 58
57727-230 174 11 230 174 11 216 158 10 192 133 9
57728-163 110 8 116 81 8 102 78 10 116 81 8
57729-167 114 7 197 138 11 226 170 11 239 182 13
57730-242 186 14 242 186 14 162 146 94 78 78 78
57731- 34 34 34 14 14 14 6 6 6 0 0 0
57732- 0 0 0 0 0 0 0 0 0 0 0 0
57733- 0 0 0 0 0 0 0 0 0 0 0 0
57734- 0 0 0 0 0 0 0 0 0 0 0 0
57735- 0 0 0 0 0 0 0 0 0 6 6 6
57736- 30 30 30 78 78 78 190 142 34 226 170 11
57737-239 182 13 246 190 14 246 190 14 246 190 14
57738-246 190 14 246 190 14 246 190 14 246 190 14
57739-246 190 14 246 190 14 246 190 14 246 190 14
57740-246 190 14 241 196 14 203 166 17 22 18 6
57741- 2 2 6 2 2 6 2 2 6 38 38 38
57742-218 218 218 253 253 253 253 253 253 253 253 253
57743-253 253 253 253 253 253 253 253 253 253 253 253
57744-253 253 253 253 253 253 253 253 253 253 253 253
57745-253 253 253 253 253 253 253 253 253 253 253 253
57746-250 250 250 206 206 206 198 198 198 202 162 69
57747-226 170 11 236 178 12 224 166 10 210 150 10
57748-200 144 11 197 138 11 192 133 9 197 138 11
57749-210 150 10 226 170 11 242 186 14 246 190 14
57750-246 190 14 246 186 14 225 175 15 124 112 88
57751- 62 62 62 30 30 30 14 14 14 6 6 6
57752- 0 0 0 0 0 0 0 0 0 0 0 0
57753- 0 0 0 0 0 0 0 0 0 0 0 0
57754- 0 0 0 0 0 0 0 0 0 0 0 0
57755- 0 0 0 0 0 0 0 0 0 10 10 10
57756- 30 30 30 78 78 78 174 135 50 224 166 10
57757-239 182 13 246 190 14 246 190 14 246 190 14
57758-246 190 14 246 190 14 246 190 14 246 190 14
57759-246 190 14 246 190 14 246 190 14 246 190 14
57760-246 190 14 246 190 14 241 196 14 139 102 15
57761- 2 2 6 2 2 6 2 2 6 2 2 6
57762- 78 78 78 250 250 250 253 253 253 253 253 253
57763-253 253 253 253 253 253 253 253 253 253 253 253
57764-253 253 253 253 253 253 253 253 253 253 253 253
57765-253 253 253 253 253 253 253 253 253 253 253 253
57766-250 250 250 214 214 214 198 198 198 190 150 46
57767-219 162 10 236 178 12 234 174 13 224 166 10
57768-216 158 10 213 154 11 213 154 11 216 158 10
57769-226 170 11 239 182 13 246 190 14 246 190 14
57770-246 190 14 246 190 14 242 186 14 206 162 42
57771-101 101 101 58 58 58 30 30 30 14 14 14
57772- 6 6 6 0 0 0 0 0 0 0 0 0
57773- 0 0 0 0 0 0 0 0 0 0 0 0
57774- 0 0 0 0 0 0 0 0 0 0 0 0
57775- 0 0 0 0 0 0 0 0 0 10 10 10
57776- 30 30 30 74 74 74 174 135 50 216 158 10
57777-236 178 12 246 190 14 246 190 14 246 190 14
57778-246 190 14 246 190 14 246 190 14 246 190 14
57779-246 190 14 246 190 14 246 190 14 246 190 14
57780-246 190 14 246 190 14 241 196 14 226 184 13
57781- 61 42 6 2 2 6 2 2 6 2 2 6
57782- 22 22 22 238 238 238 253 253 253 253 253 253
57783-253 253 253 253 253 253 253 253 253 253 253 253
57784-253 253 253 253 253 253 253 253 253 253 253 253
57785-253 253 253 253 253 253 253 253 253 253 253 253
57786-253 253 253 226 226 226 187 187 187 180 133 36
57787-216 158 10 236 178 12 239 182 13 236 178 12
57788-230 174 11 226 170 11 226 170 11 230 174 11
57789-236 178 12 242 186 14 246 190 14 246 190 14
57790-246 190 14 246 190 14 246 186 14 239 182 13
57791-206 162 42 106 106 106 66 66 66 34 34 34
57792- 14 14 14 6 6 6 0 0 0 0 0 0
57793- 0 0 0 0 0 0 0 0 0 0 0 0
57794- 0 0 0 0 0 0 0 0 0 0 0 0
57795- 0 0 0 0 0 0 0 0 0 6 6 6
57796- 26 26 26 70 70 70 163 133 67 213 154 11
57797-236 178 12 246 190 14 246 190 14 246 190 14
57798-246 190 14 246 190 14 246 190 14 246 190 14
57799-246 190 14 246 190 14 246 190 14 246 190 14
57800-246 190 14 246 190 14 246 190 14 241 196 14
57801-190 146 13 18 14 6 2 2 6 2 2 6
57802- 46 46 46 246 246 246 253 253 253 253 253 253
57803-253 253 253 253 253 253 253 253 253 253 253 253
57804-253 253 253 253 253 253 253 253 253 253 253 253
57805-253 253 253 253 253 253 253 253 253 253 253 253
57806-253 253 253 221 221 221 86 86 86 156 107 11
57807-216 158 10 236 178 12 242 186 14 246 186 14
57808-242 186 14 239 182 13 239 182 13 242 186 14
57809-242 186 14 246 186 14 246 190 14 246 190 14
57810-246 190 14 246 190 14 246 190 14 246 190 14
57811-242 186 14 225 175 15 142 122 72 66 66 66
57812- 30 30 30 10 10 10 0 0 0 0 0 0
57813- 0 0 0 0 0 0 0 0 0 0 0 0
57814- 0 0 0 0 0 0 0 0 0 0 0 0
57815- 0 0 0 0 0 0 0 0 0 6 6 6
57816- 26 26 26 70 70 70 163 133 67 210 150 10
57817-236 178 12 246 190 14 246 190 14 246 190 14
57818-246 190 14 246 190 14 246 190 14 246 190 14
57819-246 190 14 246 190 14 246 190 14 246 190 14
57820-246 190 14 246 190 14 246 190 14 246 190 14
57821-232 195 16 121 92 8 34 34 34 106 106 106
57822-221 221 221 253 253 253 253 253 253 253 253 253
57823-253 253 253 253 253 253 253 253 253 253 253 253
57824-253 253 253 253 253 253 253 253 253 253 253 253
57825-253 253 253 253 253 253 253 253 253 253 253 253
57826-242 242 242 82 82 82 18 14 6 163 110 8
57827-216 158 10 236 178 12 242 186 14 246 190 14
57828-246 190 14 246 190 14 246 190 14 246 190 14
57829-246 190 14 246 190 14 246 190 14 246 190 14
57830-246 190 14 246 190 14 246 190 14 246 190 14
57831-246 190 14 246 190 14 242 186 14 163 133 67
57832- 46 46 46 18 18 18 6 6 6 0 0 0
57833- 0 0 0 0 0 0 0 0 0 0 0 0
57834- 0 0 0 0 0 0 0 0 0 0 0 0
57835- 0 0 0 0 0 0 0 0 0 10 10 10
57836- 30 30 30 78 78 78 163 133 67 210 150 10
57837-236 178 12 246 186 14 246 190 14 246 190 14
57838-246 190 14 246 190 14 246 190 14 246 190 14
57839-246 190 14 246 190 14 246 190 14 246 190 14
57840-246 190 14 246 190 14 246 190 14 246 190 14
57841-241 196 14 215 174 15 190 178 144 253 253 253
57842-253 253 253 253 253 253 253 253 253 253 253 253
57843-253 253 253 253 253 253 253 253 253 253 253 253
57844-253 253 253 253 253 253 253 253 253 253 253 253
57845-253 253 253 253 253 253 253 253 253 218 218 218
57846- 58 58 58 2 2 6 22 18 6 167 114 7
57847-216 158 10 236 178 12 246 186 14 246 190 14
57848-246 190 14 246 190 14 246 190 14 246 190 14
57849-246 190 14 246 190 14 246 190 14 246 190 14
57850-246 190 14 246 190 14 246 190 14 246 190 14
57851-246 190 14 246 186 14 242 186 14 190 150 46
57852- 54 54 54 22 22 22 6 6 6 0 0 0
57853- 0 0 0 0 0 0 0 0 0 0 0 0
57854- 0 0 0 0 0 0 0 0 0 0 0 0
57855- 0 0 0 0 0 0 0 0 0 14 14 14
57856- 38 38 38 86 86 86 180 133 36 213 154 11
57857-236 178 12 246 186 14 246 190 14 246 190 14
57858-246 190 14 246 190 14 246 190 14 246 190 14
57859-246 190 14 246 190 14 246 190 14 246 190 14
57860-246 190 14 246 190 14 246 190 14 246 190 14
57861-246 190 14 232 195 16 190 146 13 214 214 214
57862-253 253 253 253 253 253 253 253 253 253 253 253
57863-253 253 253 253 253 253 253 253 253 253 253 253
57864-253 253 253 253 253 253 253 253 253 253 253 253
57865-253 253 253 250 250 250 170 170 170 26 26 26
57866- 2 2 6 2 2 6 37 26 9 163 110 8
57867-219 162 10 239 182 13 246 186 14 246 190 14
57868-246 190 14 246 190 14 246 190 14 246 190 14
57869-246 190 14 246 190 14 246 190 14 246 190 14
57870-246 190 14 246 190 14 246 190 14 246 190 14
57871-246 186 14 236 178 12 224 166 10 142 122 72
57872- 46 46 46 18 18 18 6 6 6 0 0 0
57873- 0 0 0 0 0 0 0 0 0 0 0 0
57874- 0 0 0 0 0 0 0 0 0 0 0 0
57875- 0 0 0 0 0 0 6 6 6 18 18 18
57876- 50 50 50 109 106 95 192 133 9 224 166 10
57877-242 186 14 246 190 14 246 190 14 246 190 14
57878-246 190 14 246 190 14 246 190 14 246 190 14
57879-246 190 14 246 190 14 246 190 14 246 190 14
57880-246 190 14 246 190 14 246 190 14 246 190 14
57881-242 186 14 226 184 13 210 162 10 142 110 46
57882-226 226 226 253 253 253 253 253 253 253 253 253
57883-253 253 253 253 253 253 253 253 253 253 253 253
57884-253 253 253 253 253 253 253 253 253 253 253 253
57885-198 198 198 66 66 66 2 2 6 2 2 6
57886- 2 2 6 2 2 6 50 34 6 156 107 11
57887-219 162 10 239 182 13 246 186 14 246 190 14
57888-246 190 14 246 190 14 246 190 14 246 190 14
57889-246 190 14 246 190 14 246 190 14 246 190 14
57890-246 190 14 246 190 14 246 190 14 242 186 14
57891-234 174 13 213 154 11 154 122 46 66 66 66
57892- 30 30 30 10 10 10 0 0 0 0 0 0
57893- 0 0 0 0 0 0 0 0 0 0 0 0
57894- 0 0 0 0 0 0 0 0 0 0 0 0
57895- 0 0 0 0 0 0 6 6 6 22 22 22
57896- 58 58 58 154 121 60 206 145 10 234 174 13
57897-242 186 14 246 186 14 246 190 14 246 190 14
57898-246 190 14 246 190 14 246 190 14 246 190 14
57899-246 190 14 246 190 14 246 190 14 246 190 14
57900-246 190 14 246 190 14 246 190 14 246 190 14
57901-246 186 14 236 178 12 210 162 10 163 110 8
57902- 61 42 6 138 138 138 218 218 218 250 250 250
57903-253 253 253 253 253 253 253 253 253 250 250 250
57904-242 242 242 210 210 210 144 144 144 66 66 66
57905- 6 6 6 2 2 6 2 2 6 2 2 6
57906- 2 2 6 2 2 6 61 42 6 163 110 8
57907-216 158 10 236 178 12 246 190 14 246 190 14
57908-246 190 14 246 190 14 246 190 14 246 190 14
57909-246 190 14 246 190 14 246 190 14 246 190 14
57910-246 190 14 239 182 13 230 174 11 216 158 10
57911-190 142 34 124 112 88 70 70 70 38 38 38
57912- 18 18 18 6 6 6 0 0 0 0 0 0
57913- 0 0 0 0 0 0 0 0 0 0 0 0
57914- 0 0 0 0 0 0 0 0 0 0 0 0
57915- 0 0 0 0 0 0 6 6 6 22 22 22
57916- 62 62 62 168 124 44 206 145 10 224 166 10
57917-236 178 12 239 182 13 242 186 14 242 186 14
57918-246 186 14 246 190 14 246 190 14 246 190 14
57919-246 190 14 246 190 14 246 190 14 246 190 14
57920-246 190 14 246 190 14 246 190 14 246 190 14
57921-246 190 14 236 178 12 216 158 10 175 118 6
57922- 80 54 7 2 2 6 6 6 6 30 30 30
57923- 54 54 54 62 62 62 50 50 50 38 38 38
57924- 14 14 14 2 2 6 2 2 6 2 2 6
57925- 2 2 6 2 2 6 2 2 6 2 2 6
57926- 2 2 6 6 6 6 80 54 7 167 114 7
57927-213 154 11 236 178 12 246 190 14 246 190 14
57928-246 190 14 246 190 14 246 190 14 246 190 14
57929-246 190 14 242 186 14 239 182 13 239 182 13
57930-230 174 11 210 150 10 174 135 50 124 112 88
57931- 82 82 82 54 54 54 34 34 34 18 18 18
57932- 6 6 6 0 0 0 0 0 0 0 0 0
57933- 0 0 0 0 0 0 0 0 0 0 0 0
57934- 0 0 0 0 0 0 0 0 0 0 0 0
57935- 0 0 0 0 0 0 6 6 6 18 18 18
57936- 50 50 50 158 118 36 192 133 9 200 144 11
57937-216 158 10 219 162 10 224 166 10 226 170 11
57938-230 174 11 236 178 12 239 182 13 239 182 13
57939-242 186 14 246 186 14 246 190 14 246 190 14
57940-246 190 14 246 190 14 246 190 14 246 190 14
57941-246 186 14 230 174 11 210 150 10 163 110 8
57942-104 69 6 10 10 10 2 2 6 2 2 6
57943- 2 2 6 2 2 6 2 2 6 2 2 6
57944- 2 2 6 2 2 6 2 2 6 2 2 6
57945- 2 2 6 2 2 6 2 2 6 2 2 6
57946- 2 2 6 6 6 6 91 60 6 167 114 7
57947-206 145 10 230 174 11 242 186 14 246 190 14
57948-246 190 14 246 190 14 246 186 14 242 186 14
57949-239 182 13 230 174 11 224 166 10 213 154 11
57950-180 133 36 124 112 88 86 86 86 58 58 58
57951- 38 38 38 22 22 22 10 10 10 6 6 6
57952- 0 0 0 0 0 0 0 0 0 0 0 0
57953- 0 0 0 0 0 0 0 0 0 0 0 0
57954- 0 0 0 0 0 0 0 0 0 0 0 0
57955- 0 0 0 0 0 0 0 0 0 14 14 14
57956- 34 34 34 70 70 70 138 110 50 158 118 36
57957-167 114 7 180 123 7 192 133 9 197 138 11
57958-200 144 11 206 145 10 213 154 11 219 162 10
57959-224 166 10 230 174 11 239 182 13 242 186 14
57960-246 186 14 246 186 14 246 186 14 246 186 14
57961-239 182 13 216 158 10 185 133 11 152 99 6
57962-104 69 6 18 14 6 2 2 6 2 2 6
57963- 2 2 6 2 2 6 2 2 6 2 2 6
57964- 2 2 6 2 2 6 2 2 6 2 2 6
57965- 2 2 6 2 2 6 2 2 6 2 2 6
57966- 2 2 6 6 6 6 80 54 7 152 99 6
57967-192 133 9 219 162 10 236 178 12 239 182 13
57968-246 186 14 242 186 14 239 182 13 236 178 12
57969-224 166 10 206 145 10 192 133 9 154 121 60
57970- 94 94 94 62 62 62 42 42 42 22 22 22
57971- 14 14 14 6 6 6 0 0 0 0 0 0
57972- 0 0 0 0 0 0 0 0 0 0 0 0
57973- 0 0 0 0 0 0 0 0 0 0 0 0
57974- 0 0 0 0 0 0 0 0 0 0 0 0
57975- 0 0 0 0 0 0 0 0 0 6 6 6
57976- 18 18 18 34 34 34 58 58 58 78 78 78
57977-101 98 89 124 112 88 142 110 46 156 107 11
57978-163 110 8 167 114 7 175 118 6 180 123 7
57979-185 133 11 197 138 11 210 150 10 219 162 10
57980-226 170 11 236 178 12 236 178 12 234 174 13
57981-219 162 10 197 138 11 163 110 8 130 83 6
57982- 91 60 6 10 10 10 2 2 6 2 2 6
57983- 18 18 18 38 38 38 38 38 38 38 38 38
57984- 38 38 38 38 38 38 38 38 38 38 38 38
57985- 38 38 38 38 38 38 26 26 26 2 2 6
57986- 2 2 6 6 6 6 70 47 6 137 92 6
57987-175 118 6 200 144 11 219 162 10 230 174 11
57988-234 174 13 230 174 11 219 162 10 210 150 10
57989-192 133 9 163 110 8 124 112 88 82 82 82
57990- 50 50 50 30 30 30 14 14 14 6 6 6
57991- 0 0 0 0 0 0 0 0 0 0 0 0
57992- 0 0 0 0 0 0 0 0 0 0 0 0
57993- 0 0 0 0 0 0 0 0 0 0 0 0
57994- 0 0 0 0 0 0 0 0 0 0 0 0
57995- 0 0 0 0 0 0 0 0 0 0 0 0
57996- 6 6 6 14 14 14 22 22 22 34 34 34
57997- 42 42 42 58 58 58 74 74 74 86 86 86
57998-101 98 89 122 102 70 130 98 46 121 87 25
57999-137 92 6 152 99 6 163 110 8 180 123 7
58000-185 133 11 197 138 11 206 145 10 200 144 11
58001-180 123 7 156 107 11 130 83 6 104 69 6
58002- 50 34 6 54 54 54 110 110 110 101 98 89
58003- 86 86 86 82 82 82 78 78 78 78 78 78
58004- 78 78 78 78 78 78 78 78 78 78 78 78
58005- 78 78 78 82 82 82 86 86 86 94 94 94
58006-106 106 106 101 101 101 86 66 34 124 80 6
58007-156 107 11 180 123 7 192 133 9 200 144 11
58008-206 145 10 200 144 11 192 133 9 175 118 6
58009-139 102 15 109 106 95 70 70 70 42 42 42
58010- 22 22 22 10 10 10 0 0 0 0 0 0
58011- 0 0 0 0 0 0 0 0 0 0 0 0
58012- 0 0 0 0 0 0 0 0 0 0 0 0
58013- 0 0 0 0 0 0 0 0 0 0 0 0
58014- 0 0 0 0 0 0 0 0 0 0 0 0
58015- 0 0 0 0 0 0 0 0 0 0 0 0
58016- 0 0 0 0 0 0 6 6 6 10 10 10
58017- 14 14 14 22 22 22 30 30 30 38 38 38
58018- 50 50 50 62 62 62 74 74 74 90 90 90
58019-101 98 89 112 100 78 121 87 25 124 80 6
58020-137 92 6 152 99 6 152 99 6 152 99 6
58021-138 86 6 124 80 6 98 70 6 86 66 30
58022-101 98 89 82 82 82 58 58 58 46 46 46
58023- 38 38 38 34 34 34 34 34 34 34 34 34
58024- 34 34 34 34 34 34 34 34 34 34 34 34
58025- 34 34 34 34 34 34 38 38 38 42 42 42
58026- 54 54 54 82 82 82 94 86 76 91 60 6
58027-134 86 6 156 107 11 167 114 7 175 118 6
58028-175 118 6 167 114 7 152 99 6 121 87 25
58029-101 98 89 62 62 62 34 34 34 18 18 18
58030- 6 6 6 0 0 0 0 0 0 0 0 0
58031- 0 0 0 0 0 0 0 0 0 0 0 0
58032- 0 0 0 0 0 0 0 0 0 0 0 0
58033- 0 0 0 0 0 0 0 0 0 0 0 0
58034- 0 0 0 0 0 0 0 0 0 0 0 0
58035- 0 0 0 0 0 0 0 0 0 0 0 0
58036- 0 0 0 0 0 0 0 0 0 0 0 0
58037- 0 0 0 6 6 6 6 6 6 10 10 10
58038- 18 18 18 22 22 22 30 30 30 42 42 42
58039- 50 50 50 66 66 66 86 86 86 101 98 89
58040-106 86 58 98 70 6 104 69 6 104 69 6
58041-104 69 6 91 60 6 82 62 34 90 90 90
58042- 62 62 62 38 38 38 22 22 22 14 14 14
58043- 10 10 10 10 10 10 10 10 10 10 10 10
58044- 10 10 10 10 10 10 6 6 6 10 10 10
58045- 10 10 10 10 10 10 10 10 10 14 14 14
58046- 22 22 22 42 42 42 70 70 70 89 81 66
58047- 80 54 7 104 69 6 124 80 6 137 92 6
58048-134 86 6 116 81 8 100 82 52 86 86 86
58049- 58 58 58 30 30 30 14 14 14 6 6 6
58050- 0 0 0 0 0 0 0 0 0 0 0 0
58051- 0 0 0 0 0 0 0 0 0 0 0 0
58052- 0 0 0 0 0 0 0 0 0 0 0 0
58053- 0 0 0 0 0 0 0 0 0 0 0 0
58054- 0 0 0 0 0 0 0 0 0 0 0 0
58055- 0 0 0 0 0 0 0 0 0 0 0 0
58056- 0 0 0 0 0 0 0 0 0 0 0 0
58057- 0 0 0 0 0 0 0 0 0 0 0 0
58058- 0 0 0 6 6 6 10 10 10 14 14 14
58059- 18 18 18 26 26 26 38 38 38 54 54 54
58060- 70 70 70 86 86 86 94 86 76 89 81 66
58061- 89 81 66 86 86 86 74 74 74 50 50 50
58062- 30 30 30 14 14 14 6 6 6 0 0 0
58063- 0 0 0 0 0 0 0 0 0 0 0 0
58064- 0 0 0 0 0 0 0 0 0 0 0 0
58065- 0 0 0 0 0 0 0 0 0 0 0 0
58066- 6 6 6 18 18 18 34 34 34 58 58 58
58067- 82 82 82 89 81 66 89 81 66 89 81 66
58068- 94 86 66 94 86 76 74 74 74 50 50 50
58069- 26 26 26 14 14 14 6 6 6 0 0 0
58070- 0 0 0 0 0 0 0 0 0 0 0 0
58071- 0 0 0 0 0 0 0 0 0 0 0 0
58072- 0 0 0 0 0 0 0 0 0 0 0 0
58073- 0 0 0 0 0 0 0 0 0 0 0 0
58074- 0 0 0 0 0 0 0 0 0 0 0 0
58075- 0 0 0 0 0 0 0 0 0 0 0 0
58076- 0 0 0 0 0 0 0 0 0 0 0 0
58077- 0 0 0 0 0 0 0 0 0 0 0 0
58078- 0 0 0 0 0 0 0 0 0 0 0 0
58079- 6 6 6 6 6 6 14 14 14 18 18 18
58080- 30 30 30 38 38 38 46 46 46 54 54 54
58081- 50 50 50 42 42 42 30 30 30 18 18 18
58082- 10 10 10 0 0 0 0 0 0 0 0 0
58083- 0 0 0 0 0 0 0 0 0 0 0 0
58084- 0 0 0 0 0 0 0 0 0 0 0 0
58085- 0 0 0 0 0 0 0 0 0 0 0 0
58086- 0 0 0 6 6 6 14 14 14 26 26 26
58087- 38 38 38 50 50 50 58 58 58 58 58 58
58088- 54 54 54 42 42 42 30 30 30 18 18 18
58089- 10 10 10 0 0 0 0 0 0 0 0 0
58090- 0 0 0 0 0 0 0 0 0 0 0 0
58091- 0 0 0 0 0 0 0 0 0 0 0 0
58092- 0 0 0 0 0 0 0 0 0 0 0 0
58093- 0 0 0 0 0 0 0 0 0 0 0 0
58094- 0 0 0 0 0 0 0 0 0 0 0 0
58095- 0 0 0 0 0 0 0 0 0 0 0 0
58096- 0 0 0 0 0 0 0 0 0 0 0 0
58097- 0 0 0 0 0 0 0 0 0 0 0 0
58098- 0 0 0 0 0 0 0 0 0 0 0 0
58099- 0 0 0 0 0 0 0 0 0 6 6 6
58100- 6 6 6 10 10 10 14 14 14 18 18 18
58101- 18 18 18 14 14 14 10 10 10 6 6 6
58102- 0 0 0 0 0 0 0 0 0 0 0 0
58103- 0 0 0 0 0 0 0 0 0 0 0 0
58104- 0 0 0 0 0 0 0 0 0 0 0 0
58105- 0 0 0 0 0 0 0 0 0 0 0 0
58106- 0 0 0 0 0 0 0 0 0 6 6 6
58107- 14 14 14 18 18 18 22 22 22 22 22 22
58108- 18 18 18 14 14 14 10 10 10 6 6 6
58109- 0 0 0 0 0 0 0 0 0 0 0 0
58110- 0 0 0 0 0 0 0 0 0 0 0 0
58111- 0 0 0 0 0 0 0 0 0 0 0 0
58112- 0 0 0 0 0 0 0 0 0 0 0 0
58113- 0 0 0 0 0 0 0 0 0 0 0 0
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58127+4 4 4 4 4 4
58128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58141+4 4 4 4 4 4
58142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58155+4 4 4 4 4 4
58156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58169+4 4 4 4 4 4
58170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58183+4 4 4 4 4 4
58184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58197+4 4 4 4 4 4
58198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58202+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58203+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58207+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58208+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58209+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58211+4 4 4 4 4 4
58212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58216+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58217+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58218+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58221+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58222+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58223+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58224+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58225+4 4 4 4 4 4
58226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58230+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58231+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58232+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58235+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58236+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58237+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58238+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58239+4 4 4 4 4 4
58240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58243+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58244+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58245+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58246+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58248+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58249+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58250+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58251+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58252+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58253+4 4 4 4 4 4
58254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58257+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58258+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58259+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58260+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58261+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58262+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58263+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58264+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58265+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58266+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58267+4 4 4 4 4 4
58268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58271+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58272+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58273+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58274+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58275+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58276+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58277+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58278+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58279+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58280+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58281+4 4 4 4 4 4
58282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58284+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58285+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58286+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58287+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58288+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58289+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58290+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58291+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58292+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58293+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58294+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58295+4 4 4 4 4 4
58296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58298+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58299+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58300+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58301+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58302+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58303+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58304+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58305+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58306+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58307+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58308+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58309+4 4 4 4 4 4
58310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58312+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58313+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58314+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58315+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58316+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58317+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58318+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58319+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58320+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58321+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58322+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58323+4 4 4 4 4 4
58324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58326+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58327+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58328+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58329+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58330+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58331+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58332+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58333+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58334+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58335+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58336+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58337+4 4 4 4 4 4
58338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58339+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58340+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58341+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58342+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58343+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58344+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58345+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58346+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58347+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58348+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58349+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58350+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58351+4 4 4 4 4 4
58352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58353+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58354+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58355+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58356+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58357+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58358+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58359+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58360+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58361+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58362+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58363+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58364+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58365+0 0 0 4 4 4
58366+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58367+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58368+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58369+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58370+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58371+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58372+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58373+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58374+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58375+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58376+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58377+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58378+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58379+2 0 0 0 0 0
58380+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58381+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58382+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58383+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58384+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58385+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58386+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58387+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58388+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58389+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58390+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58391+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58392+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58393+37 38 37 0 0 0
58394+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58395+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58396+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58397+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58398+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58399+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58400+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58401+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58402+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58403+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58404+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58405+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58406+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58407+85 115 134 4 0 0
58408+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58409+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58410+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58411+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58412+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58413+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58414+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58415+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58416+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58417+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58418+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58419+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58420+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58421+60 73 81 4 0 0
58422+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58423+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58424+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58425+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58426+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58427+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58428+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58429+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58430+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58431+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58432+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58433+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58434+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58435+16 19 21 4 0 0
58436+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58437+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58438+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58439+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58440+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58441+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58442+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58443+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58444+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58445+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58446+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58447+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58448+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58449+4 0 0 4 3 3
58450+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58451+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58452+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58454+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58455+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58456+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58457+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58458+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58459+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58460+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58461+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58462+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58463+3 2 2 4 4 4
58464+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58465+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58466+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58467+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58468+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58469+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58470+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58471+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58472+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58473+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58474+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58475+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58476+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58477+4 4 4 4 4 4
58478+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58479+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58480+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58481+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58482+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58483+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58484+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58485+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58486+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58487+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58488+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58489+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58490+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58491+4 4 4 4 4 4
58492+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58493+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58494+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58495+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58496+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58497+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58498+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58499+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58500+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58501+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58502+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58503+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58504+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58505+5 5 5 5 5 5
58506+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58507+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58508+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58509+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58510+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58511+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58512+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58513+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58514+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58515+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58516+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58517+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58518+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58519+5 5 5 4 4 4
58520+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58521+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58522+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58523+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58524+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58525+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58526+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58527+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58528+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58529+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58530+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58531+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58533+4 4 4 4 4 4
58534+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58535+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58536+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58537+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58538+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58539+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58540+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58541+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58542+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58543+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58544+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58545+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58547+4 4 4 4 4 4
58548+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58549+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58550+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58551+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58552+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58553+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58554+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58555+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58556+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58557+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58558+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58561+4 4 4 4 4 4
58562+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58563+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58564+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58565+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58566+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58567+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58568+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58569+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58570+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58571+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58572+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58575+4 4 4 4 4 4
58576+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58577+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58578+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58579+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58580+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58581+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58582+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58583+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58584+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58585+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58586+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58589+4 4 4 4 4 4
58590+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58591+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58592+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58593+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58594+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58595+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58596+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58597+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58598+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58599+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58600+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58603+4 4 4 4 4 4
58604+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58605+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58606+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58607+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58608+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58609+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58610+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58611+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58612+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58613+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58614+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58617+4 4 4 4 4 4
58618+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58619+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58620+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58621+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58622+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58623+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58624+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58625+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58626+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58627+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58628+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58631+4 4 4 4 4 4
58632+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58633+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58634+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58635+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58636+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58637+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58638+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58639+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58640+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58641+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58642+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58645+4 4 4 4 4 4
58646+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58647+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58648+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58649+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58650+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58651+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58652+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58653+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58654+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58655+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58656+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58659+4 4 4 4 4 4
58660+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58661+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58662+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58663+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58664+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58665+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58666+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58667+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58668+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58669+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58670+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58673+4 4 4 4 4 4
58674+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58675+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58676+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58677+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58678+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58679+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58680+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58681+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58682+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58683+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58684+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58687+4 4 4 4 4 4
58688+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58689+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
58690+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58691+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
58692+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58693+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
58694+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
58695+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
58696+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58697+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58698+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58701+4 4 4 4 4 4
58702+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
58703+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
58704+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58705+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
58706+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
58707+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
58708+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
58709+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
58710+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58711+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58712+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58715+4 4 4 4 4 4
58716+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
58717+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
58718+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58719+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
58720+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
58721+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
58722+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
58723+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
58724+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58725+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58726+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58729+4 4 4 4 4 4
58730+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58731+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
58732+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
58733+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
58734+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
58735+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
58736+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
58737+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
58738+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58739+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58740+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58743+4 4 4 4 4 4
58744+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
58745+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
58746+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58747+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
58748+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
58749+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
58750+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
58751+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
58752+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
58753+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58754+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58757+4 4 4 4 4 4
58758+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
58759+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
58760+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
58761+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
58762+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
58763+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
58764+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
58765+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
58766+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58767+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58768+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58771+4 4 4 4 4 4
58772+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58773+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
58774+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
58775+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
58776+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
58777+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
58778+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
58779+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
58780+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58781+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58782+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58785+4 4 4 4 4 4
58786+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58787+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
58788+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
58789+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
58790+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
58791+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
58792+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58793+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
58794+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58795+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58796+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58799+4 4 4 4 4 4
58800+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58801+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
58802+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
58803+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58804+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
58805+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
58806+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58807+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
58808+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
58809+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58810+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58813+4 4 4 4 4 4
58814+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
58815+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
58816+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
58817+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
58818+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
58819+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
58820+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
58821+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
58822+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
58823+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58824+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58827+4 4 4 4 4 4
58828+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58829+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
58830+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
58831+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
58832+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
58833+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
58834+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
58835+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
58836+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
58837+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58838+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58841+4 4 4 4 4 4
58842+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
58843+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
58844+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
58845+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
58846+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
58847+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
58848+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
58849+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
58850+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
58851+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58852+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58855+4 4 4 4 4 4
58856+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
58857+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
58858+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
58859+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
58860+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
58861+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
58862+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
58863+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
58864+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
58865+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58866+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58869+4 4 4 4 4 4
58870+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
58871+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
58872+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
58873+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
58874+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
58875+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
58876+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
58877+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
58878+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
58879+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
58880+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58883+4 4 4 4 4 4
58884+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
58885+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58886+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
58887+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
58888+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
58889+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
58890+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58891+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58892+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58893+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58894+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58897+4 4 4 4 4 4
58898+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58899+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58900+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58901+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58902+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58903+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58904+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58905+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58906+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58907+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58908+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58911+4 4 4 4 4 4
58912+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58913+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58914+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58915+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58916+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58917+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58918+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58919+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58920+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58921+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58922+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58925+4 4 4 4 4 4
58926+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58927+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58928+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58929+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58930+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58931+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58932+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58933+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58934+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58935+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58939+4 4 4 4 4 4
58940+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58941+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58942+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58943+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58944+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58945+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58946+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58947+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58948+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58949+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58953+4 4 4 4 4 4
58954+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58955+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58956+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58957+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58958+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58959+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58960+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58961+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58962+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58963+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58967+4 4 4 4 4 4
58968+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58969+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58970+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58971+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58972+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58973+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58974+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58975+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58976+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58977+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58981+4 4 4 4 4 4
58982+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58983+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58984+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58985+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58986+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58987+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58988+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58989+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58990+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58995+4 4 4 4 4 4
58996+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58997+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58998+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58999+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
59000+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
59001+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
59002+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
59003+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
59004+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
59005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59009+4 4 4 4 4 4
59010+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59011+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
59012+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
59013+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
59014+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
59015+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
59016+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
59017+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
59018+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59023+4 4 4 4 4 4
59024+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
59025+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59026+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59027+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59028+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59029+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59030+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59031+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59037+4 4 4 4 4 4
59038+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59039+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59040+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59041+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59042+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59043+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59044+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59045+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59051+4 4 4 4 4 4
59052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59053+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59054+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59055+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59056+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59057+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59058+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59059+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59065+4 4 4 4 4 4
59066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59067+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59068+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59069+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59070+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59071+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59072+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59073+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59079+4 4 4 4 4 4
59080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59081+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59082+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59083+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59084+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59085+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59086+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59087+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59093+4 4 4 4 4 4
59094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59096+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59097+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59098+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59099+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59100+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59101+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59107+4 4 4 4 4 4
59108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59111+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59112+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59113+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59114+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59121+4 4 4 4 4 4
59122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59125+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59126+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59127+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59128+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59135+4 4 4 4 4 4
59136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59139+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59140+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59141+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59142+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59149+4 4 4 4 4 4
59150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59153+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59154+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59155+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59156+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59163+4 4 4 4 4 4
59164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59168+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59169+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59170+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59177+4 4 4 4 4 4
59178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59182+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59183+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59184+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59191+4 4 4 4 4 4
59192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59196+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59197+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59198+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59205+4 4 4 4 4 4
59206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59210+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59211+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59219+4 4 4 4 4 4
59220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59224+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59225+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59233+4 4 4 4 4 4
59234diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
59235index 2b8553b..e1a482b 100644
59236--- a/drivers/xen/events/events_base.c
59237+++ b/drivers/xen/events/events_base.c
59238@@ -1564,7 +1564,7 @@ void xen_irq_resume(void)
59239 restore_pirqs();
59240 }
59241
59242-static struct irq_chip xen_dynamic_chip __read_mostly = {
59243+static struct irq_chip xen_dynamic_chip = {
59244 .name = "xen-dyn",
59245
59246 .irq_disable = disable_dynirq,
59247@@ -1578,7 +1578,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
59248 .irq_retrigger = retrigger_dynirq,
59249 };
59250
59251-static struct irq_chip xen_pirq_chip __read_mostly = {
59252+static struct irq_chip xen_pirq_chip = {
59253 .name = "xen-pirq",
59254
59255 .irq_startup = startup_pirq,
59256@@ -1598,7 +1598,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
59257 .irq_retrigger = retrigger_dynirq,
59258 };
59259
59260-static struct irq_chip xen_percpu_chip __read_mostly = {
59261+static struct irq_chip xen_percpu_chip = {
59262 .name = "xen-percpu",
59263
59264 .irq_disable = disable_dynirq,
59265diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59266index fef20db..d28b1ab 100644
59267--- a/drivers/xen/xenfs/xenstored.c
59268+++ b/drivers/xen/xenfs/xenstored.c
59269@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59270 static int xsd_kva_open(struct inode *inode, struct file *file)
59271 {
59272 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59273+#ifdef CONFIG_GRKERNSEC_HIDESYM
59274+ NULL);
59275+#else
59276 xen_store_interface);
59277+#endif
59278+
59279 if (!file->private_data)
59280 return -ENOMEM;
59281 return 0;
59282diff --git a/firmware/Makefile b/firmware/Makefile
59283index e297e1b..6900c31 100644
59284--- a/firmware/Makefile
59285+++ b/firmware/Makefile
59286@@ -35,6 +35,7 @@ fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.9.0.fw \
59287 bnx2x/bnx2x-e1h-6.2.9.0.fw \
59288 bnx2x/bnx2x-e2-6.2.9.0.fw
59289 fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
59290+ bnx2/bnx2-mips-09-6.2.1b.fw \
59291 bnx2/bnx2-rv2p-09-6.0.17.fw \
59292 bnx2/bnx2-rv2p-09ax-6.0.17.fw \
59293 bnx2/bnx2-mips-06-6.2.1.fw \
59294diff --git a/firmware/WHENCE b/firmware/WHENCE
59295index 0c4d96d..7563083 100644
59296--- a/firmware/WHENCE
59297+++ b/firmware/WHENCE
59298@@ -655,19 +655,20 @@ Driver: BNX2 - Broadcom NetXtremeII
59299 File: bnx2/bnx2-mips-06-6.2.1.fw
59300 File: bnx2/bnx2-rv2p-06-6.0.15.fw
59301 File: bnx2/bnx2-mips-09-6.2.1a.fw
59302+File: bnx2/bnx2-mips-09-6.2.1b.fw
59303 File: bnx2/bnx2-rv2p-09-6.0.17.fw
59304 File: bnx2/bnx2-rv2p-09ax-6.0.17.fw
59305
59306 Licence:
59307-
59308- This file contains firmware data derived from proprietary unpublished
59309- source code, Copyright (c) 2004 - 2010 Broadcom Corporation.
59310-
59311- Permission is hereby granted for the distribution of this firmware data
59312- in hexadecimal or equivalent format, provided this copyright notice is
59313- accompanying it.
59314-
59315-Found in hex form in kernel source.
59316+
59317+ This file contains firmware data derived from proprietary unpublished
59318+ source code, Copyright (c) 2004 - 2010 Broadcom Corporation.
59319+
59320+ Permission is hereby granted for the distribution of this firmware data
59321+ in hexadecimal or equivalent format, provided this copyright notice is
59322+ accompanying it.
59323+
59324+Found in hex form in kernel source.
59325
59326 --------------------------------------------------------------------------
59327
59328diff --git a/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
59329new file mode 100644
59330index 0000000..43d7c4f
59331--- /dev/null
59332+++ b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
59333@@ -0,0 +1,6496 @@
59334+:10000000080001180800000000005594000000C816
59335+:1000100000000000000000000000000008005594EF
59336+:10002000000000380000565C080000A00800000036
59337+:100030000000574400005694080059200000008436
59338+:100040000000ADD808005744000001C00000AE5CBD
59339+:100050000800321008000000000092580000B01C98
59340+:10006000000000000000000000000000080092589E
59341+:100070000000033C000142740800049008000400E2
59342+:10008000000012FC000145B000000000000000006C
59343+:1000900000000000080016FC00000004000158AC3D
59344+:1000A000080000A80800000000003D00000158B052
59345+:1000B00000000000000000000000000008003D00FB
59346+:1000C00000000030000195B00A000046000000006A
59347+:1000D000000000000000000D636F6D362E322E31DF
59348+:1000E00062000000060201020000000000000003A0
59349+:1000F000000000C800000032000000030000000003
59350+:1001000000000000000000000000000000000000EF
59351+:1001100000000010000001360000EA600000000549
59352+:1001200000000000000000000000000000000008C7
59353+:1001300000000000000000000000000000000000BF
59354+:1001400000000000000000000000000000000000AF
59355+:10015000000000000000000000000000000000009F
59356+:10016000000000020000000000000000000000008D
59357+:10017000000000000000000000000000000000007F
59358+:10018000000000000000000000000010000000005F
59359+:10019000000000000000000000000000000000005F
59360+:1001A000000000000000000000000000000000004F
59361+:1001B000000000000000000000000000000000003F
59362+:1001C000000000000000000000000000000000002F
59363+:1001D000000000000000000000000000000000001F
59364+:1001E0000000000010000003000000000000000DEF
59365+:1001F0000000000D3C020800244256083C030800A1
59366+:1002000024635754AC4000000043202B1480FFFDB2
59367+:10021000244200043C1D080037BD9FFC03A0F021D0
59368+:100220003C100800261001183C1C0800279C5608AA
59369+:100230000E000256000000000000000D27BDFFB4B4
59370+:10024000AFA10000AFA20004AFA30008AFA4000C50
59371+:10025000AFA50010AFA60014AFA70018AFA8001CF0
59372+:10026000AFA90020AFAA0024AFAB0028AFAC002C90
59373+:10027000AFAD0030AFAE0034AFAF0038AFB8003C28
59374+:10028000AFB90040AFBC0044AFBF00480E001544FA
59375+:10029000000000008FBF00488FBC00448FB90040B1
59376+:1002A0008FB8003C8FAF00388FAE00348FAD003078
59377+:1002B0008FAC002C8FAB00288FAA00248FA90020C0
59378+:1002C0008FA8001C8FA700188FA600148FA5001000
59379+:1002D0008FA4000C8FA300088FA200048FA1000040
59380+:1002E00027BD004C3C1B60108F7A5030377B502864
59381+:1002F00003400008AF7A00008F82002427BDFFE092
59382+:10030000AFB00010AFBF0018AFB100148C42000CAA
59383+:100310003C1080008E110100104000348FBF001887
59384+:100320000E000D84000000008F85002024047FFF54
59385+:100330000091202BACB100008E030104960201084D
59386+:1003400000031C003042FFFF00621825ACA300042C
59387+:100350009202010A96030114304200FF3063FFFF4E
59388+:100360000002140000431025ACA200089603010C03
59389+:100370009602010E00031C003042FFFF00621825A8
59390+:10038000ACA3000C960301109602011200031C009E
59391+:100390003042FFFF00621825ACA300108E02011846
59392+:1003A000ACA200148E02011CACA20018148000083C
59393+:1003B0008F820024978200003C0420050044182509
59394+:1003C00024420001ACA3001C0A0000C6A782000062
59395+:1003D0003C0340189442001E00431025ACA2001CB0
59396+:1003E0000E000DB8240400018FBF00188FB1001457
59397+:1003F0008FB000100000102103E0000827BD00208E
59398+:100400003C0780008CE202B834E50100044100089A
59399+:10041000240300013C0208008C42006024420001D9
59400+:100420003C010800AC22006003E0000800601021DD
59401+:100430003C0208008C42005C8CA4002094A30016AF
59402+:100440008CA6000494A5000E24420001ACE40280B6
59403+:100450002463FFFC3C010800AC22005C3C0210005D
59404+:10046000A4E30284A4E5028600001821ACE6028819
59405+:10047000ACE202B803E000080060102127BDFFE0F5
59406+:100480003C028000AFB0001034420100AFBF001C3E
59407+:10049000AFB20018AFB100148C43000094450008BF
59408+:1004A0002462FE002C42038110400003000381C23D
59409+:1004B0000A00010226100004240201001462000553
59410+:1004C0003C1180003C02800890420004305000FF44
59411+:1004D0003C11800036320100964300143202000FB6
59412+:1004E00000021500004310253C0308008C63004403
59413+:1004F00030A40004AE220080246300013C01080007
59414+:10050000AC2300441080000730A200028FBF001C03
59415+:100510008FB200188FB100148FB000100A0000CE07
59416+:1005200027BD00201040002D0000182130A20080BF
59417+:1005300010400005362200708E44001C0E000C672F
59418+:10054000240500A0362200708C4400008F82000C2D
59419+:10055000008210232C43012C10600004AF82001095
59420+:10056000240300010A000145AF84000C8E42000400
59421+:100570003C036020AF84000CAC6200143C02080015
59422+:100580008C42005850400015000018218C62000475
59423+:10059000240301FE304203FF144300100000182121
59424+:1005A0002E020004104000032E0200080A00014041
59425+:1005B0000000802114400003000000000A000140F8
59426+:1005C0002610FFF90000000D2402000202021004B0
59427+:1005D0003C036000AC626914000018218FBF001C4E
59428+:1005E0008FB200188FB100148FB00010006010217E
59429+:1005F00003E0000827BD00203C0480008C8301003C
59430+:1006000024020100506200033C0280080000000D3B
59431+:100610003C02800890430004000010213063000F6A
59432+:1006200000031D0003E00008AC8300800004188074
59433+:100630002782FF9C00621821000410C00044102390
59434+:100640008C640000000210C03C030800246356E4E0
59435+:10065000004310213C038000AC64009003E00008DC
59436+:10066000AF8200243C0208008C42011410400019A3
59437+:100670003084400030A2007F000231C03C02020002
59438+:100680001080001400A218253C026020AC43001426
59439+:100690003C0408008C8456B83C0308008C630110AD
59440+:1006A0003C02800024050900AC4500200086202182
59441+:1006B000246300013C028008AC4400643C01080053
59442+:1006C000AC2301103C010800AC2456B803E000083C
59443+:1006D000000000003C02602003E00008AC4500146C
59444+:1006E00003E000080000102103E0000800001021D2
59445+:1006F00030A2000810400008240201003C0208005B
59446+:100700008C42010C244200013C010800AC22010C87
59447+:1007100003E0000800000000148200080000000050
59448+:100720003C0208008C4200FC244200013C0108000D
59449+:10073000AC2200FC0A0001A330A200203C02080009
59450+:100740008C420084244200013C010800AC22008459
59451+:1007500030A200201040000830A200103C02080027
59452+:100760008C420108244200013C010800AC2201082F
59453+:1007700003E0000800000000104000080000000036
59454+:100780003C0208008C420104244200013C010800A4
59455+:10079000AC22010403E00008000000003C02080055
59456+:1007A0008C420100244200013C010800AC220100FF
59457+:1007B00003E000080000000027BDFFE0AFB1001417
59458+:1007C0003C118000AFB20018AFBF001CAFB00010EA
59459+:1007D0003632010096500008320200041040000733
59460+:1007E000320300028FBF001C8FB200188FB10014BB
59461+:1007F0008FB000100A0000CE27BD00201060000B53
59462+:10080000020028218E2401000E00018A0000000051
59463+:100810003202008010400003240500A10E000C6786
59464+:100820008E44001C0A0001E3240200018E2301040F
59465+:100830008F82000810430006020028218E24010048
59466+:100840000E00018A000000008E220104AF82000821
59467+:10085000000010218FBF001C8FB200188FB1001450
59468+:100860008FB0001003E0000827BD00202C82000498
59469+:1008700014400002000018212483FFFD240200021E
59470+:10088000006210043C03600003E00008AC626914DD
59471+:1008900027BDFFE0AFBF001CAFB20018AFB100141E
59472+:1008A000AFB000103C048000948201083043700017
59473+:1008B000240220001062000A2862200154400052E5
59474+:1008C0008FBF001C24024000106200482402600018
59475+:1008D0001062004A8FBF001C0A0002518FB200183C
59476+:1008E00034820100904300098C5000189451000C90
59477+:1008F000240200091062001C0000902128620009F7
59478+:10090000144000218F8200242402000A5062001249
59479+:10091000323100FF2402000B1062000F00000000C3
59480+:100920002402000C146200188F8200243C0208008C
59481+:100930008C4256B824030900AC83002000501021DB
59482+:100940003C038008AC6200643C010800AC2256B84D
59483+:100950000A0002508FBF001C0E0001E900102602A1
59484+:100960000A0002308F8200240E0001E900102602E6
59485+:100970003C0380089462001A8C72000C3042FFFF26
59486+:10098000020280258F8200248C42000C5040001E01
59487+:100990008FBF001C0E000D84000000003C02800090
59488+:1009A00034420100944300088F82002400031C009D
59489+:1009B0009444001E8F82002000641825AC50000073
59490+:1009C00024040001AC510004AC520008AC40000CFF
59491+:1009D000AC400010AC400014AC4000180E000DB844
59492+:1009E000AC43001C0A0002508FBF001C0E000440E4
59493+:1009F000000000000A0002508FBF001C0E000C9F78
59494+:100A0000000000008FBF001C8FB200188FB10014CF
59495+:100A10008FB000100000102103E0000827BD002067
59496+:100A200027BDFFD8AFB400203C036010AFBF002447
59497+:100A3000AFB3001CAFB20018AFB10014AFB00010DC
59498+:100A40008C6450002402FF7F3C1408002694563822
59499+:100A5000008220243484380CAC6450003C028000B6
59500+:100A6000240300370E0014B0AC4300083C07080014
59501+:100A700024E70618028010212404001D2484FFFFAF
59502+:100A8000AC4700000481FFFD244200043C02080042
59503+:100A9000244207C83C010800AC2256403C02080032
59504+:100AA000244202303C030800246306203C04080072
59505+:100AB000248403B43C05080024A506F03C06080085
59506+:100AC00024C62C9C3C010800AC2256803C02080045
59507+:100AD000244205303C010800AC2756843C01080044
59508+:100AE000AC2656943C010800AC23569C3C010800FF
59509+:100AF000AC2456A03C010800AC2556A43C010800DB
59510+:100B0000AC2256A83C010800AC23563C3C0108002E
59511+:100B1000AC2456443C010800AC2056603C0108005F
59512+:100B2000AC2556643C010800AC2056703C0108001E
59513+:100B3000AC27567C3C010800AC2656903C010800CE
59514+:100B4000AC2356980E00056E00000000AF80000C2C
59515+:100B50003C0280008C5300008F8300043C0208009C
59516+:100B60008C420020106200213262000700008821C0
59517+:100B70002792FF9C3C100800261056E43C02080017
59518+:100B80008C42002024050001022518040043202483
59519+:100B90008F820004004310245044000C26310001D1
59520+:100BA00010800008AF9000248E4300003C028000BB
59521+:100BB000AC4300900E000D4BAE05000C0A0002C1C4
59522+:100BC00026310001AE00000C263100012E22000269
59523+:100BD000261000381440FFE9265200043C020800A9
59524+:100BE0008C420020AF820004326200071040FFD91F
59525+:100BF0003C028000326200011040002D326200028F
59526+:100C00003C0580008CA2010000002021ACA2002045
59527+:100C10008CA301042C42078110400008ACA300A85B
59528+:100C200094A2010824032000304270001443000302
59529+:100C30003C02800890420005304400FF0E0001593C
59530+:100C4000000000003C0280009042010B304300FF96
59531+:100C50002C62001E54400004000310800E00018628
59532+:100C60000A0002EC00000000005410218C42000039
59533+:100C70000040F80900000000104000043C02800021
59534+:100C80008C4301043C026020AC4300143C02080089
59535+:100C90008C4200343C0440003C03800024420001AC
59536+:100CA000AC6401383C010800AC220034326200021E
59537+:100CB00010400010326200043C1080008E0201409F
59538+:100CC000000020210E000159AE0200200E00038317
59539+:100CD000000000003C024000AE0201783C02080027
59540+:100CE0008C420038244200013C010800AC2200384C
59541+:100CF000326200041040FF973C0280003C108000EC
59542+:100D00008E020180000020210E000159AE02002059
59543+:100D10008E03018024020F00546200073C02800809
59544+:100D20008E0201883C0300E03042FFFF00431025A3
59545+:100D30000A000328AE020080344200809042000086
59546+:100D400024030050304200FF14430007000000005D
59547+:100D50000E000362000000001440000300000000C9
59548+:100D60000E000971000000003C0208008C42003CAB
59549+:100D70003C0440003C03800024420001AC6401B804
59550+:100D80003C010800AC22003C0A0002A33C028000A7
59551+:100D90003C02900034420001008220253C02800089
59552+:100DA000AC4400203C0380008C6200200440FFFE25
59553+:100DB0000000000003E00008000000003C0280008A
59554+:100DC000344300010083202503E00008AC440020E8
59555+:100DD00027BDFFE0AFB10014AFB000100080882144
59556+:100DE000AFBF00180E00033230B000FF8F83FF94B6
59557+:100DF000022020219062002502028025A07000259B
59558+:100E00008C7000183C0280000E00033D020280241A
59559+:100E10001600000B8FBF00183C0480008C8201F884
59560+:100E20000440FFFE348201C024030002AC510000E4
59561+:100E3000A04300043C021000AC8201F88FBF0018F0
59562+:100E40008FB100148FB0001003E0000827BD002010
59563+:100E500027BDFFE83C028000AFBF00103442018094
59564+:100E6000944300048C4400083063020010600005C5
59565+:100E7000000028210E00100C000000000A0003787A
59566+:100E8000240500013C02FF000480000700821824B2
59567+:100E90003C02040014620004240500018F82FF94C8
59568+:100EA00090420008240500018FBF001000A010210F
59569+:100EB00003E0000827BD00188F82FF982405000179
59570+:100EC000A040001A3C028000344201400A00034264
59571+:100ED0008C4400008F85FF9427BDFFE0AFBF001C4E
59572+:100EE000AFB20018AFB10014AFB0001090A2000074
59573+:100EF000304400FF38830020388200300003182B74
59574+:100F00000002102B0062182410600003240200501D
59575+:100F1000148200A88FBF001C90A20005304200017F
59576+:100F2000104000A48FBF001C3C02800034420140EE
59577+:100F3000904200082443FFFF2C6200051040009EF1
59578+:100F40008FB20018000310803C030800246355ACE6
59579+:100F5000004310218C420000004000080000000007
59580+:100F60003C028000345101400E0003328E24000008
59581+:100F70008F92FF948E2200048E50000C1602000205
59582+:100F800024020001AE42000C0E00033D8E2400003E
59583+:100F90008E220004145000068FBF001C8FB2001870
59584+:100FA0008FB100148FB000100A000F7827BD002009
59585+:100FB0008E42000C0A000419000000003C0480006E
59586+:100FC0003482014094A300108C4200043063FFFF80
59587+:100FD0001443001C0000000024020001A4A2001021
59588+:100FE0008C8202380441000F3C0380003C02003F29
59589+:100FF0003448F0003C0760003C06FFC08CE22BBC8C
59590+:1010000000461824004810240002130200031D8229
59591+:10101000106200583C0280008C8202380440FFF7C6
59592+:101020003C038000346201408C44000034620200C2
59593+:10103000AC4400003C021000AC6202380A00043BE1
59594+:101040008FBF001C94A200100A00041900000000C9
59595+:10105000240200201482000F3C0280003C03800028
59596+:1010600094A20012346301408C6300043042FFFFFD
59597+:10107000146200050000000024020001A4A2001276
59598+:101080000A0004028FBF001C94A200120A00041977
59599+:1010900000000000345101400E0003328E24000095
59600+:1010A0008F92FF948E230004964200123050FFFF6F
59601+:1010B0001603000224020001A64200120E00033DA6
59602+:1010C0008E2400008E220004160200068FBF001C32
59603+:1010D0008FB200188FB100148FB000100A00037C8B
59604+:1010E00027BD0020964200120A00041900000000EB
59605+:1010F0003C03800094A20014346301408C6300041C
59606+:101100003042FFFF14620008240200018FBF001C60
59607+:101110008FB200188FB100148FB00010A4A2001479
59608+:101120000A00146327BD002094A20014144000217B
59609+:101130008FBF001C0A000435000000003C03800043
59610+:1011400094A20016346301408C6300043042FFFF18
59611+:101150001462000D240200018FBF001C8FB2001822
59612+:101160008FB100148FB00010A4A200160A000B1457
59613+:1011700027BD00209442007824420004A4A200105D
59614+:101180000A00043B8FBF001C94A200162403000138
59615+:101190003042FFFF144300078FBF001C3C020800D1
59616+:1011A0008C420070244200013C010800AC22007017
59617+:1011B0008FBF001C8FB200188FB100148FB00010C9
59618+:1011C00003E0000827BD002027BDFFD8AFB20018FC
59619+:1011D0008F92FF94AFB10014AFBF0020AFB3001CDB
59620+:1011E000AFB000103C028000345101008C5001006F
59621+:1011F0009242000092230009304400FF2402001FA5
59622+:10120000106200AB28620020104000192402003850
59623+:101210002862000A1040000D2402000B286200081A
59624+:101220001040002E8F820024046001042862000216
59625+:101230001440002A8F820024240200061062002637
59626+:101240008FBF00200A00055F8FB3001C1062006092
59627+:101250002862000B144000FA8FBF00202402000E09
59628+:10126000106200788F8200240A00055F8FB3001C93
59629+:10127000106200D2286200391040000A2402008067
59630+:1012800024020036106200E528620037104000C3D7
59631+:1012900024020035106200D98FBF00200A00055FCC
59632+:1012A0008FB3001C1062002D2862008110400006E0
59633+:1012B000240200C824020039106200C98FBF002038
59634+:1012C0000A00055F8FB3001C106200A28FBF0020D0
59635+:1012D0000A00055F8FB3001C8F8200248C42000C33
59636+:1012E000104000D78FBF00200E000D8400000000CA
59637+:1012F0003C038000346301008C6200008F85002075
59638+:10130000946700089466000CACA200008C64000492
59639+:101310008F82002400063400ACA400049448001E10
59640+:101320008C62001800073C0000E83825ACA20008D9
59641+:101330008C62001C24040001ACA2000C9062000A24
59642+:1013400000C23025ACA60010ACA00014ACA0001860
59643+:10135000ACA7001C0A00051D8FBF00208F8200244F
59644+:101360008C42000C104000B68FBF00200E000D8490
59645+:10137000000000008F820024962400089625000CAF
59646+:101380009443001E000422029626000E8F82002045
59647+:10139000000426000083202500052C003C0300806B
59648+:1013A00000A6282500832025AC400000AC400004A6
59649+:1013B000AC400008AC40000CAC450010AC40001440
59650+:1013C000AC400018AC44001C0A00051C24040001B9
59651+:1013D0009622000C14400018000000009242000504
59652+:1013E0003042001014400014000000000E000332D0
59653+:1013F0000200202192420005020020213442001008
59654+:101400000E00033DA242000592420000240300208A
59655+:10141000304200FF10430089020020218FBF0020CE
59656+:101420008FB3001C8FB200188FB100148FB0001062
59657+:101430000A00107527BD00280000000D0A00055E97
59658+:101440008FBF00208C42000C1040007D8FBF002019
59659+:101450000E000D84000000008E2200048F84002006
59660+:101460009623000CAC8200003C0280089445002CBE
59661+:101470008F82002400031C0030A5FFFF9446001E4D
59662+:101480003C02400E0065182500C23025AC830004E4
59663+:10149000AC800008AC80000CAC800010AC80001464
59664+:1014A000AC800018AC86001C0A00051C2404000156
59665+:1014B0000E000332020020218F93FF9802002021AA
59666+:1014C0000E00033DA660000C020020210E00034226
59667+:1014D000240500018F8200248C42000C104000582B
59668+:1014E0008FBF00200E000D84000000009622000C2B
59669+:1014F0008F83002000021400AC700000AC62000476
59670+:10150000AC6000088E4400388F820024AC64000C6C
59671+:101510008E46003C9445001E3C02401FAC66001005
59672+:1015200000A228258E62000424040001AC6200148D
59673+:10153000AC600018AC65001C8FBF00208FB3001C8E
59674+:101540008FB200188FB100148FB000100A000DB8D0
59675+:1015500027BD0028240200201082003A8FB3001C0F
59676+:101560000E000F5E00000000104000358FBF00200D
59677+:101570003C0480008C8201F80440FFFE348201C0EC
59678+:1015800024030002AC500000A04300043C02100001
59679+:10159000AC8201F80A00055E8FBF00200200202106
59680+:1015A0008FBF00208FB3001C8FB200188FB10014C2
59681+:1015B0008FB000100A000EA727BD00289625000C4A
59682+:1015C000020020218FBF00208FB3001C8FB20018B3
59683+:1015D0008FB100148FB000100A000ECC27BD002878
59684+:1015E000020020218FB3001C8FB200188FB10014AD
59685+:1015F0008FB000100A000EF727BD00289225000DBD
59686+:10160000020020218FB3001C8FB200188FB100148C
59687+:101610008FB000100A000F4827BD002802002021CB
59688+:101620008FBF00208FB3001C8FB200188FB1001441
59689+:101630008FB000100A000F1F27BD00288FBF0020A9
59690+:101640008FB3001C8FB200188FB100148FB0001040
59691+:1016500003E0000827BD00283C0580008CA202782A
59692+:101660000440FFFE34A2024024030002AC44000008
59693+:10167000A04300043C02100003E00008ACA2027882
59694+:10168000A380001803E00008A38000193C03800039
59695+:101690008C6202780440FFFE8F82001CAC62024024
59696+:1016A00024020002A06202443C02100003E0000891
59697+:1016B000AC6202783C02600003E000088C425404F3
59698+:1016C0009083003024020005008040213063003FF9
59699+:1016D0000000482114620005000050219082004C57
59700+:1016E0009483004E304900FF306AFFFFAD00000CCC
59701+:1016F000AD000010AD000024950200148D05001C03
59702+:101700008D0400183042FFFF004910230002110031
59703+:10171000000237C3004038210086202300A2102B8E
59704+:101720000082202300A72823AD05001CAD0400186B
59705+:10173000A5090014A5090020A50A001603E0000869
59706+:10174000A50A002203E000080000000027BDFFD822
59707+:10175000AFB200183C128008AFB40020AFB3001C39
59708+:10176000AFB10014AFBF0024AFB00010365101007C
59709+:101770003C0260008C4254049222000C3C1408008D
59710+:10178000929400F7304300FF2402000110620032FF
59711+:101790000080982124020002146200353650008037
59712+:1017A0000E00143D000000009202004C2403FF8054
59713+:1017B0003C0480003042007F000211C024420240FD
59714+:1017C0000262102100431824AC8300949245000863
59715+:1017D0009204004C3042007F3C03800614850007D1
59716+:1017E000004380212402FFFFA22200112402FFFFF8
59717+:1017F000A62200120A0005D22402FFFF9602002052
59718+:10180000A222001196020022A62200128E020024BB
59719+:101810003C048008AE2200143485008090A2004C65
59720+:1018200034830100A06200108CA2003CAC6200185E
59721+:101830008C820068AC6200F48C820064AC6200F0C0
59722+:101840008C82006CAC6200F824020001A0A2006847
59723+:101850000A0005EE3C0480080E001456000000004B
59724+:1018600036420080A04000680A0005EE3C04800873
59725+:10187000A2000068A20000690A0006293C02800854
59726+:10188000348300808C62003834850100AC62006CC7
59727+:1018900024020001A062006990A200D59083000894
59728+:1018A000305100FF3072007F12320019001111C058
59729+:1018B00024420240026210212403FF8000431824C6
59730+:1018C0003C048000AC8300943042007F3C038006DF
59731+:1018D000004380218E02000C1040000D02002021E8
59732+:1018E0000E00057E0000000026220001305100FF9E
59733+:1018F0009203003C023410260002102B0002102339
59734+:101900003063007F022288240A0005F8A203003C0D
59735+:101910003C088008350401008C8200E03507008017
59736+:10192000ACE2003C8C8200E0AD02000090E5004C8F
59737+:10193000908600D590E3004C908400D52402FF806F
59738+:1019400000A228243063007F308400FF00A62825F1
59739+:101950000064182A1060000230A500FF38A500803E
59740+:10196000A0E5004CA10500093C0280089043000E50
59741+:10197000344400803C058000A043000A8C8300189A
59742+:101980003C027FFF3442FFFF00621824AC83001842
59743+:101990008CA201F80440FFFE00000000ACB301C0BF
59744+:1019A0008FBF00248FB400208FB3001C8FB20018AB
59745+:1019B0008FB100148FB0001024020002A0A201C455
59746+:1019C00027BD00283C02100003E00008ACA201F88B
59747+:1019D00090A2000024420001A0A200003C030800E5
59748+:1019E0008C6300F4304200FF144300020080302179
59749+:1019F000A0A0000090A200008F84001C000211C073
59750+:101A00002442024024830040008220212402FF80DF
59751+:101A1000008220243063007F3C02800A006218218B
59752+:101A20003C028000AC44002403E00008ACC300008A
59753+:101A300094820006908300058C85000C8C86001033
59754+:101A40008C8700188C88001C8C8400203C010800C6
59755+:101A5000A42256C63C010800A02356C53C0108003C
59756+:101A6000AC2556CC3C010800AC2656D03C01080001
59757+:101A7000AC2756D83C010800AC2856DC3C010800D5
59758+:101A8000AC2456E003E00008000000003C0280089F
59759+:101A9000344201008C4400343C038000346504006F
59760+:101AA000AC6400388C420038AF850028AC62003C42
59761+:101AB0003C020005AC6200300000000000000000A5
59762+:101AC00003E00008000000003C020006308400FF34
59763+:101AD000008220253C028000AC4400300000000061
59764+:101AE00000000000000000003C0380008C62000049
59765+:101AF000304200101040FFFD3462040003E0000893
59766+:101B0000AF82002894C200003C080800950800CA73
59767+:101B100030E7FFFF0080482101021021A4C200002D
59768+:101B200094C200003042FFFF00E2102B544000013D
59769+:101B3000A4C7000094A200003C0308008C6300CC02
59770+:101B400024420001A4A2000094A200003042FFFF42
59771+:101B5000144300073C0280080107102BA4A00000DA
59772+:101B60005440000101003821A4C700003C02800855
59773+:101B7000344601008CC3002894A200003C0480007D
59774+:101B80003042FFFE000210C000621021AC82003C17
59775+:101B90008C82003C006218231860000400000000E2
59776+:101BA0008CC200240A0006BA244200018CC2002420
59777+:101BB000AC8200383C020050344200103C038000EC
59778+:101BC000AC620030000000000000000000000000D7
59779+:101BD0008C620000304200201040FFFD0000000039
59780+:101BE00094A200003C04800030420001000210C0BA
59781+:101BF000004410218C430400AD2300008C420404F7
59782+:101C0000AD2200043C02002003E00008AC8200305A
59783+:101C100027BDFFE0AFB20018AFB10014AFB00010A5
59784+:101C2000AFBF001C94C2000000C080213C1208001D
59785+:101C3000965200C624420001A6020000960300004E
59786+:101C400094E2000000E03021144300058FB1003021
59787+:101C50000E00068F024038210A0006F10000000045
59788+:101C60008C8300048C82000424420040046100073D
59789+:101C7000AC8200048C8200040440000400000000D8
59790+:101C80008C82000024420001AC8200009602000019
59791+:101C90003042FFFF50520001A600000096220000D3
59792+:101CA00024420001A62200003C02800834420100C8
59793+:101CB000962300009442003C144300048FBF001C94
59794+:101CC00024020001A62200008FBF001C8FB2001862
59795+:101CD0008FB100148FB0001003E0000827BD002072
59796+:101CE00027BDFFE03C028008AFBF0018344201006E
59797+:101CF0008C4800343C03800034690400AC68003830
59798+:101D00008C42003830E700FFAF890028AC62003C0D
59799+:101D10003C020005AC620030000000000000000042
59800+:101D200000000000000000000000000000000000B3
59801+:101D30008C82000C8C82000C97830016AD22000070
59802+:101D40008C82001000604021AD2200048C820018BB
59803+:101D5000AD2200088C82001CAD22000C8CA2001465
59804+:101D6000AD2200108C820020AD220014908200056C
59805+:101D7000304200FF00021200AD2200188CA20018B1
59806+:101D8000AD22001C8CA2000CAD2200208CA2001001
59807+:101D9000AD2200248CA2001CAD2200288CA20020C1
59808+:101DA000AD22002C3402FFFFAD260030AD20003400
59809+:101DB000506200013408FFFFAD28003850E00011E8
59810+:101DC0003C0280083C048008348401009482005066
59811+:101DD0003042FFFFAD22003C9483004494850044D0
59812+:101DE000240200013063FFFF000318C200641821C1
59813+:101DF0009064006430A5000700A210040A00075C8C
59814+:101E00000044102534420100AD20003C94430044BE
59815+:101E1000944400443063FFFF000318C2006218219D
59816+:101E200030840007906500642402000100821004E1
59817+:101E30000002102700451024A0620064000000008A
59818+:101E400000000000000000003C0200063442004098
59819+:101E50003C038000AC620030000000000000000085
59820+:101E6000000000008C620000304200101040FFFDB6
59821+:101E70003C06800834C201503463040034C7014A70
59822+:101E800034C4013434C5014034C60144AFA200104B
59823+:101E90000E0006D2AF8300288FBF001803E00008B1
59824+:101EA00027BD00208F8300143C0608008CC600E884
59825+:101EB0008F82001C30633FFF000319800046102111
59826+:101EC000004310212403FF80004318243C068000B7
59827+:101ED000ACC300283042007F3C03800C004330211B
59828+:101EE00090C2000D30A500FF0000382134420010E0
59829+:101EF000A0C2000D8F8900143C028008344201000A
59830+:101F00009443004400091382304800032402000176
59831+:101F1000A4C3000E1102000B2902000210400005AC
59832+:101F2000240200021100000C240300010A0007A48F
59833+:101F30000000182111020006000000000A0007A49A
59834+:101F4000000018218CC2002C0A0007A424430001C1
59835+:101F50008CC20014244300018CC200180043102BD3
59836+:101F60005040000A240700012402002714A20003A5
59837+:101F70003C0380080A0007B1240700013463010014
59838+:101F80009462004C24420001A462004C00091382B8
59839+:101F9000304300032C620002104000090080282119
59840+:101FA000146000040000000094C200340A0007C15D
59841+:101FB0003046FFFF8CC600380A0007C10080282188
59842+:101FC000000030213C040800248456C00A000706A3
59843+:101FD0000000000027BDFF90AFB60068AFB50064F9
59844+:101FE000AFB40060AFB3005CAFB20058AFB1005403
59845+:101FF000AFBF006CAFB000508C9000000080B021EB
59846+:102000003C0208008C4200E8960400328F83001CDA
59847+:102010002414FF8030843FFF0062182100042180D7
59848+:1020200000641821007410243C13800000A090214B
59849+:1020300090A50000AE620028920400323C02800CA1
59850+:102040003063007F00628821308400C02402004099
59851+:10205000148200320000A8218E3500388E2200182C
59852+:102060001440000224020001AE2200189202003C3B
59853+:10207000304200201440000E8F83001C000511C068
59854+:102080002442024000621821306400783C02008043
59855+:102090000082202500741824AE630800AE64081086
59856+:1020A0008E2200188E03000800431021AE22001873
59857+:1020B0008E22002C8E230018244200010062182B6F
59858+:1020C0001060004300000000924200002442000122
59859+:1020D000A24200003C0308008C6300F4304200FF81
59860+:1020E00050430001A2400000924200008F84001C77
59861+:1020F000000211C024420240248300403063007F6C
59862+:10210000008220213C02800A0094202400621821D1
59863+:10211000AE6400240A0008D2AEC30000920300326D
59864+:102120002402FFC000431024304200FF1440000589
59865+:1021300024020001AE220018962200340A00084250
59866+:102140003055FFFF8E22001424420001AE220018F9
59867+:102150009202003000021600000216030441001C27
59868+:10216000000000009602003227A400100080282101
59869+:10217000A7A20016960200320000302124070001B9
59870+:102180003042FFFFAF8200140E000706AFA0001C14
59871+:10219000960200328F83001C3C0408008C8400E807
59872+:1021A00030423FFF000211800064182100621821B4
59873+:1021B00000741024AE62002C3063007F3C02800E5D
59874+:1021C000006218219062000D3042007FA062000D75
59875+:1021D0009222000D304200105040007892420000E0
59876+:1021E0003C028008344401009482004C8EC30000FD
59877+:1021F0003C130800967300C62442FFFFA482004CE3
59878+:10220000946200329623000E3054FFFF3070FFFFBF
59879+:102210003C0308008C6300D000701807A7A30038A7
59880+:102220009482003E3063FFFF3042FFFF14620007DC
59881+:10223000000000008C8200303C038000244200300B
59882+:10224000AC62003C0A00086A8C82002C9482004038
59883+:102250003042FFFF5462000927A400408C820038FE
59884+:102260003C03800024420030AC62003C8C8200348D
59885+:10227000AC6200380A0008793C03800027A50038CA
59886+:1022800027A60048026038210E00068FA7A000484C
59887+:102290008FA300403C02800024630030AC43003830
59888+:1022A0008FA30044AC43003C3C0380003C0200058B
59889+:1022B000AC6200303C028008344401009482004249
59890+:1022C000346304003042FFFF0202102B1440000769
59891+:1022D000AF8300289482004E9483004202021021B2
59892+:1022E000004310230A00088F3043FFFF9483004E01
59893+:1022F00094820042026318210050102300621823C8
59894+:102300003063FFFF3C028008344401009482003CAB
59895+:102310003042FFFF14430003000000000A00089F42
59896+:10232000240300019482003C3042FFFF0062102B26
59897+:10233000144000058F8200289482003C0062102324
59898+:102340003043FFFF8F820028AC550000AC400004F2
59899+:10235000AC540008AC43000C3C02000634420010B0
59900+:102360003C038000AC620030000000000000000070
59901+:10237000000000008C620000304200101040FFFDA1
59902+:102380003C04800834840100001018C20064182145
59903+:102390009065006432020007240600010046100424
59904+:1023A00000451025A0620064948300429622000E2E
59905+:1023B00050430001A386001892420000244200010D
59906+:1023C000A24200003C0308008C6300F4304200FF8E
59907+:1023D00050430001A2400000924200008F84001C84
59908+:1023E000000211C0244202402483004000822021C8
59909+:1023F0002402FF80008220243063007F3C02800A98
59910+:10240000006218213C028000AC440024AEC30000EE
59911+:102410008FBF006C8FB600688FB500648FB400600A
59912+:102420008FB3005C8FB200588FB100548FB0005052
59913+:1024300003E0000827BD007027BDFFD8AFB3001C24
59914+:10244000AFB20018AFB10014AFB00010AFBF0020A2
59915+:102450000080982100E0802130B1FFFF0E000D8444
59916+:1024600030D200FF0000000000000000000000006B
59917+:102470008F8200208F830024AC510000AC520004F6
59918+:10248000AC530008AC40000CAC400010AC40001451
59919+:10249000AC4000189463001E02038025AC50001C61
59920+:1024A0000000000000000000000000002404000103
59921+:1024B0008FBF00208FB3001C8FB200188FB10014A3
59922+:1024C0008FB000100A000DB827BD002830A5FFFF0F
59923+:1024D0000A0008DC30C600FF3C02800834430100DB
59924+:1024E0009462000E3C080800950800C63046FFFFC5
59925+:1024F00014C000043402FFFF946500EA0A000929B1
59926+:102500008F84001C10C20027000000009462004E5F
59927+:102510009464003C3045FFFF00A6102300A6182B52
59928+:102520003087FFFF106000043044FFFF00C5102318
59929+:1025300000E210233044FFFF0088102B1040000EF3
59930+:1025400000E810233C028008344401002403000109
59931+:1025500034420080A44300162402FFFFA482000E30
59932+:10256000948500EA8F84001C0000302130A5FFFF15
59933+:102570000A0009013C0760200044102A10400009AD
59934+:102580003C0280083443008094620016304200010F
59935+:10259000104000043C0280009442007E244200145B
59936+:1025A000A462001603E000080000000027BDFFE061
59937+:1025B0003C028008AFBF001CAFB0001834420100DD
59938+:1025C000944300429442004C104000193068FFFFD1
59939+:1025D0009383001824020001146200298FBF001C9D
59940+:1025E0003C06800834D00100000810C200501021C1
59941+:1025F000904200643103000734C70148304200FFB5
59942+:10260000006210073042000134C9014E34C4012C6D
59943+:1026100034C5013E1040001634C601420E0006D2F9
59944+:10262000AFA90010960200420A0009463048FFFF99
59945+:102630003C028008344401009483004494820042A8
59946+:102640001043000F8FBF001C94820044A4820042FC
59947+:1026500094820050A482004E8C820038AC820030FC
59948+:1026600094820040A482003E9482004AA4820048E2
59949+:102670008FBF001C8FB000180A00090427BD00207E
59950+:102680008FB0001803E0000827BD002027BDFFA081
59951+:10269000AFB1004C3C118000AFBF0058AFB3005445
59952+:1026A000AFB20050AFB000483626018890C2000398
59953+:1026B0003044007FA3A400108E32018090C200003D
59954+:1026C0003043007F240200031062003BAF92001CE5
59955+:1026D00028620004104000062402000424020002C4
59956+:1026E000106200098FBF00580A000B0F8FB300540F
59957+:1026F0001062004D240200051062014E8FBF005889
59958+:102700000A000B0F8FB30054000411C002421021C5
59959+:102710002404FF8024420240004410242643004049
59960+:10272000AE2200243063007F3C02800A0062182140
59961+:102730009062003CAFA3003C00441025A062003C26
59962+:102740008FA3003C9062003C304200401040016C7E
59963+:102750008FBF00583C108008A3800018361001007D
59964+:102760008E0200E08C63003427A4003C27A50010F3
59965+:10277000004310210E0007C3AE0200E093A2001038
59966+:102780003C038000A20200D58C6202780440FFFE68
59967+:102790008F82001CAC62024024020002A06202444C
59968+:1027A0003C021000AC6202780E0009390000000003
59969+:1027B0000A000B0E8FBF00583C05800890C3000133
59970+:1027C00090A2000B1443014E8FBF005834A4008028
59971+:1027D0008C8200189082004C90A200083C0260009D
59972+:1027E0008C4254048C8300183C027FFF3442FFFF6C
59973+:1027F000006218243C0208008C4200B4AC8300182C
59974+:102800003C038000244200013C010800AC2200B4DB
59975+:102810008C6201F80440FFFE8F82001CAC6201C094
59976+:102820000A000AD6240200023C10800890C300016E
59977+:102830009202000B144301328FBF005827A40018E6
59978+:1028400036050110240600033C0260008C4254044B
59979+:102850000E000E470000000027A40028360501F0F6
59980+:102860000E000E47240600038FA200283603010045
59981+:10287000AE0200648FA2002CAE0200688FA200306E
59982+:10288000AE02006C93A40018906300D52402FF8070
59983+:102890000082102400431025304900FF3084007F5F
59984+:1028A0003122007F0082102A544000013929008023
59985+:1028B000000411C0244202402403FF800242102180
59986+:1028C00000431024AE220094264200403042007F94
59987+:1028D0003C038006004340218FA3001C2402FFFF1D
59988+:1028E000AFA800403C130800927300F71062003359
59989+:1028F00093A2001995030014304400FF3063FFFFDA
59990+:102900000064182B106000100000000095040014F3
59991+:102910008D07001C8D0600183084FFFF0044202323
59992+:102920000004210000E438210000102100E4202BE5
59993+:1029300000C2302100C43021AD07001CAD060018D4
59994+:102940000A000A2F93A20019950400148D07001C99
59995+:102950008D0600183084FFFF008220230004210030
59996+:10296000000010210080182100C2302300E4202B39
59997+:1029700000C4302300E33823AD07001CAD06001867
59998+:1029800093A200198FA30040A462001497A2001A1A
59999+:10299000A46200168FA2001CAC6200108FA2001C63
60000+:1029A000AC62000C93A20019A462002097A2001A46
60001+:1029B000A46200228FA2001CAC6200243C048008A8
60002+:1029C000348300808C6200388FA20020012088218F
60003+:1029D000AC62003C8FA20020AC82000093A20018E1
60004+:1029E000A062004C93A20018A0820009A0600068B9
60005+:1029F00093A20018105100512407FF803229007F54
60006+:102A0000000911C024420240024210213046007FDA
60007+:102A10003C03800000471024AC6200943C02800616
60008+:102A200000C2302190C2003CAFA60040000020212F
60009+:102A300000471025A0C2003C8FA80040950200026C
60010+:102A4000950300148D07001C3042FFFF3063FFFF29
60011+:102A50008D060018004310230002110000E2382107
60012+:102A600000E2102B00C4302100C23021AD07001C51
60013+:102A7000AD06001895020002A5020014A50000167C
60014+:102A80008D020008AD0200108D020008AD02000C9E
60015+:102A900095020002A5020020A50000228D02000878
60016+:102AA000AD0200249102003C304200401040001A68
60017+:102AB000262200013C108008A3A90038A38000183A
60018+:102AC000361001008E0200E08D03003427A4004080
60019+:102AD00027A50038004310210E0007C3AE0200E016
60020+:102AE00093A200383C038000A20200D58C620278D9
60021+:102AF0000440FFFE8F82001CAC62024024020002F0
60022+:102B0000A06202443C021000AC6202780E00093957
60023+:102B100000000000262200013043007F14730004EF
60024+:102B2000004020212403FF8002231024004320269C
60025+:102B300093A200180A000A4B309100FF93A40018DA
60026+:102B40008FA3001C2402FFFF1062000A308900FFDF
60027+:102B500024820001248300013042007F14530005C9
60028+:102B6000306900FF2403FF800083102400431026F7
60029+:102B7000304900FF3C028008904200080120882173
60030+:102B8000305000FF123000193222007F000211C0C5
60031+:102B900002421021244202402403FF8000431824F3
60032+:102BA0003C048000AC8300943042007F3C038006EC
60033+:102BB000004310218C43000C004020211060000BCA
60034+:102BC000AFA200400E00057E000000002623000199
60035+:102BD0002405FF803062007F145300020225202468
60036+:102BE000008518260A000AAF307100FF3C048008F7
60037+:102BF000348400808C8300183C027FFF3442FFFF46
60038+:102C000000621824AC8300183C0380008C6201F839
60039+:102C10000440FFFE00000000AC7201C0240200026C
60040+:102C2000A06201C43C021000AC6201F80A000B0E65
60041+:102C30008FBF00583C04800890C300019082000BB5
60042+:102C40001443002F8FBF0058349000809202000878
60043+:102C500030420040104000200000000092020008B6
60044+:102C60000002160000021603044100050240202164
60045+:102C70000E000ECC240500930A000B0E8FBF0058E7
60046+:102C80009202000924030018304200FF1443000D93
60047+:102C900002402021240500390E000E64000030217E
60048+:102CA0000E0003328F84001C8F82FF9424030012D5
60049+:102CB000A04300090E00033D8F84001C0A000B0E88
60050+:102CC0008FBF0058240500360E000E64000030212E
60051+:102CD0000A000B0E8FBF00580E0003320240202165
60052+:102CE000920200058F84001C344200200E00033D38
60053+:102CF000A20200050E0010758F84001C8FBF0058C3
60054+:102D00008FB300548FB200508FB1004C8FB0004889
60055+:102D100003E0000827BD00603C0280083445010044
60056+:102D20003C0280008C42014094A3000E0000302140
60057+:102D300000402021AF82001C3063FFFF3402FFFF00
60058+:102D4000106200063C0760202402FFFFA4A2000ED0
60059+:102D500094A500EA0A00090130A5FFFF03E000087E
60060+:102D60000000000027BDFFC83C0280003C06800830
60061+:102D7000AFB5002CAFB1001CAFBF0030AFB400281E
60062+:102D8000AFB30024AFB20020AFB00018345101003F
60063+:102D900034C501008C4301008E2200148CA400E491
60064+:102DA0000000A821AF83001C0044102318400052EB
60065+:102DB000A38000188E22001400005021ACA200E471
60066+:102DC00090C3000890A200D53073007FA3A200102A
60067+:102DD0008CB200E08CB400E4304200FF1053003BA2
60068+:102DE00093A200108F83001C2407FF80000211C0F3
60069+:102DF0000062102124420240246300400047102456
60070+:102E00003063007F3C0980003C08800A006818217C
60071+:102E1000AD2200248C62003427A4001427A50010E2
60072+:102E2000024280210290102304400028AFA3001426
60073+:102E30009062003C00E21024304200FF1440001970
60074+:102E4000020090219062003C34420040A062003CAD
60075+:102E50008F86001C93A3001024C200403042007FE4
60076+:102E6000004828213C0208008C4200F42463000141
60077+:102E7000306400FF14820002A3A30010A3A000107E
60078+:102E800093A20010AFA50014000211C0244202401A
60079+:102E900000C2102100471024AD2200240A000B4577
60080+:102EA00093A200100E0007C3000000003C0280083F
60081+:102EB00034420100AC5000E093A30010240A00014A
60082+:102EC000A04300D50A000B4593A200102402000184
60083+:102ED000154200093C0380008C6202780440FFFE2A
60084+:102EE0008F82001CAC62024024020002A0620244F5
60085+:102EF0003C021000AC6202789222000B2403000214
60086+:102F0000304200FF144300720000000096220008C7
60087+:102F1000304300FF24020082146200402402008437
60088+:102F20003C028000344901008D22000C95230006EC
60089+:102F3000000216023063FFFF3045003F24020027E5
60090+:102F400010A2000FAF83001428A200281040000830
60091+:102F5000240200312402002110A2000924020025CD
60092+:102F600010A20007938200190A000BBD00000000A8
60093+:102F700010A20007938200190A000BBD0000000098
60094+:102F80000E000777012020210A000C3D0000000000
60095+:102F90003C0380008C6202780440FFFE8F82001C9C
60096+:102FA000AC62024024020002A06202443C02100013
60097+:102FB000AC6202780A000C3D000000009523000678
60098+:102FC000912400058D25000C8D2600108D270018FA
60099+:102FD0008D28001C8D290020244200013C0108009E
60100+:102FE000A42356C63C010800A02456C53C01080095
60101+:102FF000AC2556CC3C010800AC2656D03C0108005C
60102+:10300000AC2756D83C010800AC2856DC3C0108002F
60103+:10301000AC2956E00A000C3DA38200191462000A94
60104+:10302000240200813C02800834420100944500EAF9
60105+:10303000922600058F84001C30A5FFFF30C600FFDC
60106+:103040000A000BFE3C0760211462005C00000000D7
60107+:103050009222000A304300FF306200201040000737
60108+:10306000306200403C02800834420100944500EA8E
60109+:103070008F84001C0A000BFC24060040104000074F
60110+:10308000000316003C02800834420100944500EA27
60111+:103090008F84001C0A000BFC24060041000216036A
60112+:1030A000044100463C02800834420100944500EA95
60113+:1030B0008F84001C2406004230A5FFFF3C076019E6
60114+:1030C0000E000901000000000A000C3D0000000095
60115+:1030D0009222000B24040016304200FF1044000628
60116+:1030E0003C0680009222000B24030017304200FFB0
60117+:1030F000144300320000000034C5010090A2000B10
60118+:10310000304200FF1444000B000080218CA20020FC
60119+:103110008CA400202403FF800043102400021140EF
60120+:103120003084007F004410253C032000004310251C
60121+:10313000ACC2083094A2000800021400000214037C
60122+:10314000044200012410000194A2000830420080D3
60123+:103150005040001A0200A82194A20008304220002A
60124+:10316000504000160200A8218CA300183C021C2D20
60125+:10317000344219ED106200110200A8213C0208003F
60126+:103180008C4200D4104000053C0280082403000457
60127+:1031900034420100A04300FC3C028008344201009C
60128+:1031A000944500EA8F84001C2406000630A5FFFF2A
60129+:1031B0000E0009013C0760210200A8210E00093918
60130+:1031C000000000009222000A304200081040000473
60131+:1031D00002A010210E0013790000000002A01021AF
60132+:1031E0008FBF00308FB5002C8FB400288FB3002420
60133+:1031F0008FB200208FB1001C8FB0001803E00008D0
60134+:1032000027BD00382402FF80008220243C02900069
60135+:1032100034420007008220253C028000AC4400209C
60136+:103220003C0380008C6200200440FFFE0000000090
60137+:1032300003E00008000000003C0380002402FF803F
60138+:10324000008220243462000700822025AC64002024
60139+:103250008C6200200440FFFE0000000003E0000834
60140+:103260000000000027BDFFD8AFB3001CAFB10014B1
60141+:10327000AFB00010AFBF0020AFB200183C1180000B
60142+:103280003C0280088E32002034530100AE2400201E
60143+:10329000966300EA000514003C074000004738250B
60144+:1032A00000A08021000030210E0009013065FFFFE1
60145+:1032B000240200A1160200022402FFFFA2620009FC
60146+:1032C000AE3200208FBF00208FB3001C8FB20018D9
60147+:1032D0008FB100148FB0001003E0000827BD002854
60148+:1032E0003C0280082403000527BDFFE834420100AA
60149+:1032F000A04300FCAFBF00103C0280008C420100E4
60150+:10330000240500A1004020210E000C67AF82001CA4
60151+:103310003C0380008C6202780440FFFE8F82001C18
60152+:103320008FBF001027BD0018AC62024024020002CB
60153+:10333000A06202443C021000AC62027803E0000884
60154+:103340000000000027BDFFE83C068000AFBF001072
60155+:1033500034C7010094E20008304400FF3883008243
60156+:10336000388200842C6300012C4200010062182581
60157+:103370001060002D24020083938200195040003B0E
60158+:103380008FBF00103C020800904256CC8CC4010054
60159+:103390003C06080094C656C63045003F38A30032AC
60160+:1033A00038A2003F2C6300012C4200010062182566
60161+:1033B000AF84001CAF860014A380001914600007BE
60162+:1033C00000E020212402002014A2001200000000CE
60163+:1033D0003402FFFF14C2000F00000000240200208E
60164+:1033E00014A2000500E028218CE300142402FFFF52
60165+:1033F0005062000B8FBF00103C040800248456C0AC
60166+:10340000000030210E000706240700010A000CD638
60167+:103410008FBF00100E000777000000008FBF001064
60168+:103420000A00093927BD001814820004240200850F
60169+:103430008CC501040A000CE1000020211482000662
60170+:103440002482FF808CC50104240440008FBF00103B
60171+:103450000A00016727BD0018304200FF2C4200021D
60172+:1034600010400004240200228FBF00100A000B2726
60173+:1034700027BD0018148200048F8200248FBF001023
60174+:103480000A000C8627BD00188C42000C1040001E5C
60175+:1034900000E0282190E300092402001814620003D0
60176+:1034A000240200160A000CFC240300081462000722
60177+:1034B00024020017240300123C02800834420080DA
60178+:1034C000A04300090A000D0994A7000854620007F0
60179+:1034D00094A700088F82FF942404FFFE9043000508
60180+:1034E00000641824A043000594A7000890A6001BC0
60181+:1034F0008CA4000094A500068FBF001000073C00BC
60182+:103500000A0008DC27BD00188FBF001003E0000888
60183+:1035100027BD00188F8500243C04800094A2002A57
60184+:103520008CA30034000230C02402FFF000C210243B
60185+:1035300000621821AC83003C8CA200303C03800068
60186+:10354000AC8200383C02005034420010AC620030C3
60187+:103550000000000000000000000000008C6200007D
60188+:10356000304200201040FFFD30C20008104000062D
60189+:103570003C0280008C620408ACA200208C62040C27
60190+:103580000A000D34ACA200248C430400ACA300203C
60191+:103590008C420404ACA200243C0300203C028000C6
60192+:1035A000AC4300303C0480008C8200300043102487
60193+:1035B0001440FFFD8F8600243C020040AC820030A6
60194+:1035C00094C3002A94C2002894C4002C94C5002EF1
60195+:1035D00024630001004410213064FFFFA4C20028CE
60196+:1035E00014850002A4C3002AA4C0002A03E0000836
60197+:1035F000000000008F84002427BDFFE83C05800404
60198+:1036000024840010AFBF00100E000E472406000AED
60199+:103610008F840024948200129483002E3042000F85
60200+:10362000244200030043180424027FFF0043102BB0
60201+:1036300010400002AC8300000000000D0E000D13CE
60202+:10364000000000008F8300248FBF001027BD0018EA
60203+:10365000946200149463001A3042000F00021500B7
60204+:10366000006218253C02800003E00008AC4300A083
60205+:103670008F8300243C028004944400069462001A64
60206+:103680008C650000A4640016004410233042FFFF44
60207+:103690000045102B03E00008384200018F8400240D
60208+:1036A0003C0780049486001A8C85000094E2000692
60209+:1036B000A482001694E3000600C310233042FFFFEB
60210+:1036C0000045102B384200011440FFF8A483001677
60211+:1036D00003E00008000000008F8400243C02800406
60212+:1036E000944200069483001A8C850000A482001680
60213+:1036F000006210233042FFFF0045102B38420001CA
60214+:103700005040000D8F850024006030213C0780046C
60215+:1037100094E20006A482001694E3000600C310237E
60216+:103720003042FFFF0045102B384200011440FFF8E3
60217+:10373000A48300168F8500243C03800034620400BB
60218+:103740008CA40020AF820020AC6400388CA200243E
60219+:10375000AC62003C3C020005AC62003003E00008B3
60220+:10376000ACA000048F8400243C0300068C8200047B
60221+:1037700000021140004310253C038000AC62003081
60222+:103780000000000000000000000000008C6200004B
60223+:10379000304200101040FFFD34620400AC80000491
60224+:1037A00003E00008AF8200208F86002427BDFFE0E1
60225+:1037B000AFB10014AFB00010AFBF00188CC300044D
60226+:1037C0008CC500248F820020309000FF94C4001A22
60227+:1037D00024630001244200202484000124A7002047
60228+:1037E000ACC30004AF820020A4C4001AACC70024FC
60229+:1037F00004A100060000882104E2000594C2001A1A
60230+:103800008CC2002024420001ACC2002094C2001AE5
60231+:1038100094C300282E040001004310262C4200010E
60232+:10382000004410245040000594C2001A24020001F4
60233+:10383000ACC2000894C2001A94C300280010202BC8
60234+:10384000004310262C4200010044102514400007BC
60235+:10385000000000008CC20008144000042402001084
60236+:103860008CC300041462000F8F8500240E000DA786
60237+:10387000241100018F820024944300289442001AEE
60238+:1038800014430003000000000E000D1300000000B0
60239+:10389000160000048F8500240E000D840000000037
60240+:1038A0008F85002494A2001E94A4001C24420001D1
60241+:1038B0003043FFFF14640002A4A2001EA4A0001E57
60242+:1038C0001200000A3C02800494A2001494A3001A7F
60243+:1038D0003042000F00021500006218253C028000F3
60244+:1038E000AC4300A00A000E1EACA0000894420006E3
60245+:1038F00094A3001A8CA40000A4A200160062102356
60246+:103900003042FFFF0044102B384200011040000DF0
60247+:1039100002201021006030213C07800494E2000660
60248+:10392000A4A2001694E3000600C310233042FFFF58
60249+:103930000044102B384200011440FFF8A4A30016E5
60250+:10394000022010218FBF00188FB100148FB000101B
60251+:1039500003E0000827BD002003E00008000000008D
60252+:103960008F82002C3C03000600021140004310250A
60253+:103970003C038000AC62003000000000000000004A
60254+:10398000000000008C620000304200101040FFFD7B
60255+:1039900034620400AF82002803E00008AF80002CEE
60256+:1039A00003E000080000102103E000080000000010
60257+:1039B0003084FFFF30A5FFFF0000182110800007B2
60258+:1039C000000000003082000110400002000420428C
60259+:1039D000006518210A000E3D0005284003E000089C
60260+:1039E0000060102110C0000624C6FFFF8CA200005A
60261+:1039F00024A50004AC8200000A000E4724840004C1
60262+:103A000003E000080000000010A0000824A3FFFF4E
60263+:103A1000AC86000000000000000000002402FFFF50
60264+:103A20002463FFFF1462FFFA2484000403E000080B
60265+:103A3000000000003C0280083442008024030001A2
60266+:103A4000AC43000CA4430010A4430012A443001490
60267+:103A500003E00008A44300168F82002427BDFFD88E
60268+:103A6000AFB3001CAFB20018AFB10014AFB000107C
60269+:103A7000AFBF00208C47000C248200802409FF8007
60270+:103A80003C08800E3043007F008080213C0A80008B
60271+:103A9000004920240068182130B100FF30D200FF17
60272+:103AA00010E000290000982126020100AD44002CFE
60273+:103AB000004928243042007F004820219062000005
60274+:103AC00024030050304200FF1443000400000000B3
60275+:103AD000AD45002C948200EA3053FFFF0E000D84A8
60276+:103AE000000000008F8200248F83002000112C0032
60277+:103AF0009442001E001224003484000100A22825F4
60278+:103B00003C02400000A22825AC7000008FBF0020BE
60279+:103B1000AC6000048FB20018AC7300088FB10014C1
60280+:103B2000AC60000C8FB3001CAC6400108FB00010B0
60281+:103B3000AC60001424040001AC60001827BD00280C
60282+:103B40000A000DB8AC65001C8FBF00208FB3001CAD
60283+:103B50008FB200188FB100148FB0001003E000087E
60284+:103B600027BD00283C06800034C201009043000FAE
60285+:103B7000240200101062000E2865001110A000073A
60286+:103B800024020012240200082405003A10620006F4
60287+:103B90000000302103E0000800000000240500358B
60288+:103BA0001462FFFC000030210A000E6400000000D7
60289+:103BB0008CC200748F83FF9424420FA003E000089E
60290+:103BC000AC62000C27BDFFE8AFBF00100E0003423F
60291+:103BD000240500013C0480088FBF0010240200016E
60292+:103BE00034830080A462001227BD00182402000163
60293+:103BF00003E00008A080001A27BDFFE0AFB2001864
60294+:103C0000AFB10014AFB00010AFBF001C30B2FFFF67
60295+:103C10000E000332008088213C028008345000806E
60296+:103C20009202000924030004304200FF1443000CF8
60297+:103C30003C028008124000082402000A0E000E5BBD
60298+:103C400000000000920200052403FFFE0043102440
60299+:103C5000A202000524020012A20200093C02800810
60300+:103C600034420080022020210E00033DA0400027A6
60301+:103C700016400003022020210E000EBF00000000AD
60302+:103C800002202021324600FF8FBF001C8FB2001897
60303+:103C90008FB100148FB00010240500380A000E64A4
60304+:103CA00027BD002027BDFFE0AFBF001CAFB200184A
60305+:103CB000AFB10014AFB000100E00033200808021BD
60306+:103CC0000E000E5B000000003C02800834450080BE
60307+:103CD00090A2000924120018305100FF1232000394
60308+:103CE0000200202124020012A0A2000990A20005D7
60309+:103CF0002403FFFE004310240E00033DA0A2000594
60310+:103D00000200202124050020163200070000302187
60311+:103D10008FBF001C8FB200188FB100148FB000103D
60312+:103D20000A00034227BD00208FBF001C8FB200187D
60313+:103D30008FB100148FB00010240500390A000E6402
60314+:103D400027BD002027BDFFE83C028000AFB0001077
60315+:103D5000AFBF0014344201009442000C2405003629
60316+:103D60000080802114400012304600FF0E00033214
60317+:103D7000000000003C02800834420080240300124E
60318+:103D8000A043000990430005346300100E000E5B51
60319+:103D9000A04300050E00033D020020210200202167
60320+:103DA0000E000342240500200A000F3C0000000022
60321+:103DB0000E000E64000000000E00033202002021FD
60322+:103DC0003C0280089043001B2405FF9F0200202135
60323+:103DD000006518248FBF00148FB00010A043001B93
60324+:103DE0000A00033D27BD001827BDFFE0AFBF001844
60325+:103DF000AFB10014AFB0001030B100FF0E000332BD
60326+:103E0000008080213C02800824030012344200809C
60327+:103E10000E000E5BA04300090E00033D02002021AE
60328+:103E200002002021022030218FBF00188FB1001422
60329+:103E30008FB00010240500350A000E6427BD002055
60330+:103E40003C0480089083000E9082000A1443000B0B
60331+:103E5000000028218F82FF942403005024050001D4
60332+:103E600090420000304200FF1443000400000000B4
60333+:103E70009082000E24420001A082000E03E00008A0
60334+:103E800000A010213C0380008C6201F80440FFFE7A
60335+:103E900024020002AC6401C0A06201C43C02100014
60336+:103EA00003E00008AC6201F827BDFFE0AFB20018E4
60337+:103EB0003C128008AFB10014AFBF001CAFB00010BF
60338+:103EC00036510080922200092403000A304200FF8C
60339+:103ED0001443003E000000008E4300048E22003890
60340+:103EE000506200808FBF001C92220000240300500B
60341+:103EF000304200FF144300253C0280008C42014008
60342+:103F00008E4300043642010002202821AC43001CED
60343+:103F10009622005C8E2300383042FFFF00021040E2
60344+:103F200000621821AE23001C8E4300048E2400384A
60345+:103F30009622005C006418233042FFFF0003184300
60346+:103F4000000210400043102A10400006000000004C
60347+:103F50008E4200048E230038004310230A000FAA6B
60348+:103F6000000220439622005C3042FFFF0002204006
60349+:103F70003C0280083443010034420080ACA4002C91
60350+:103F8000A040002424020001A062000C0E000F5E7D
60351+:103F900000000000104000538FBF001C3C02800056
60352+:103FA0008C4401403C0380008C6201F80440FFFE19
60353+:103FB00024020002AC6401C0A06201C43C021000F3
60354+:103FC000AC6201F80A0010078FBF001C92220009A2
60355+:103FD00024030010304200FF144300043C02800020
60356+:103FE0008C4401400A000FEE0000282192220009B3
60357+:103FF00024030016304200FF14430006240200147C
60358+:10400000A22200093C0280008C4401400A001001F9
60359+:104010008FBF001C8E2200388E23003C00431023EB
60360+:10402000044100308FBF001C92220027244200016F
60361+:10403000A2220027922200272C42000414400016DE
60362+:104040003C1080009222000924030004304200FF4B
60363+:10405000144300093C0280008C4401408FBF001CC7
60364+:104060008FB200188FB100148FB000102405009398
60365+:104070000A000ECC27BD00208C440140240500938B
60366+:104080008FBF001C8FB200188FB100148FB00010CA
60367+:104090000A000F4827BD00208E0401400E000332A5
60368+:1040A000000000008E4200042442FFFFAE420004E4
60369+:1040B0008E22003C2442FFFFAE22003C0E00033D56
60370+:1040C0008E0401408E0401408FBF001C8FB2001887
60371+:1040D0008FB100148FB00010240500040A000342C1
60372+:1040E00027BD00208FB200188FB100148FB00010D0
60373+:1040F00003E0000827BD00203C0680008CC2018838
60374+:104100003C038008346500809063000E00021402B6
60375+:10411000304400FF306300FF1464000E3C0280084E
60376+:1041200090A20026304200FF104400098F82FF94C5
60377+:10413000A0A400262403005090420000304200FF5B
60378+:1041400014430006000000000A0005A18CC4018091
60379+:104150003C02800834420080A044002603E00008AE
60380+:104160000000000027BDFFE030E700FFAFB20018FD
60381+:10417000AFBF001CAFB10014AFB0001000809021A1
60382+:1041800014E0000630C600FF000000000000000D33
60383+:10419000000000000A001060240001163C038008A3
60384+:1041A0009062000E304200FF14460023346200800B
60385+:1041B00090420026304200FF1446001F000000001D
60386+:1041C0009062000F304200FF1446001B0000000008
60387+:1041D0009062000A304200FF144600038F90FF9463
60388+:1041E0000000000D8F90FF948F82FF983C1180009B
60389+:1041F000AE05003CAC450000A066000A0E0003328C
60390+:104200008E240100A20000240E00033D8E24010034
60391+:104210003C0380008C6201F80440FFFE240200028F
60392+:10422000AC7201C0A06201C43C021000AC6201F893
60393+:104230000A0010618FBF001C000000000000000D8C
60394+:10424000000000002400013F8FBF001C8FB2001847
60395+:104250008FB100148FB0001003E0000827BD0020CC
60396+:104260008F83FF943C0280008C44010034420100A3
60397+:104270008C65003C9046001B0A00102724070001B3
60398+:104280003C0280089043000E9042000A0043102632
60399+:10429000304200FF03E000080002102B27BDFFE0C2
60400+:1042A0003C028008AFB10014AFB00010AFBF0018DF
60401+:1042B0003450008092020005240300303042003068
60402+:1042C00014430085008088218F8200248C42000CDA
60403+:1042D000104000828FBF00180E000D840000000007
60404+:1042E0008F860020ACD100009202000892030009E2
60405+:1042F000304200FF00021200306300FF004310252F
60406+:10430000ACC200049202004D000216000002160327
60407+:1043100004410005000000003C0308008C630048D5
60408+:104320000A00109F3C1080089202000830420040B2
60409+:10433000144000030000182192020027304300FFC0
60410+:104340003C108008361100809222004D00031E00B0
60411+:10435000304200FF0002140000621825ACC30008C0
60412+:104360008E2400308F820024ACC4000C8E250034D3
60413+:104370009443001E3C02C00BACC50010006218251F
60414+:104380008E22003800002021ACC200148E22003C96
60415+:10439000ACC200180E000DB8ACC3001C8E020004A5
60416+:1043A0008F8400203C058000AC8200008E2200201B
60417+:1043B000AC8200048E22001CAC8200088E220058C1
60418+:1043C0008CA3007400431021AC82000C8E22002CC0
60419+:1043D000AC8200108E2200408E23004400021400A4
60420+:1043E00000431025AC8200149222004D240300806B
60421+:1043F000304200FF1443000400000000AC800018AD
60422+:104400000A0010E38F8200248E23000C2402000196
60423+:104410001062000E2402FFFF92220008304200408A
60424+:104420001440000A2402FFFF8E23000C8CA20074AB
60425+:10443000006218233C0208000062102414400002AD
60426+:10444000000028210060282100051043AC820018DC
60427+:104450008F820024000020219443001E3C02C00CE7
60428+:10446000006218258F8200200E000DB8AC43001C9E
60429+:104470003C038008346201008C4200008F850020DC
60430+:10448000346300808FBF0018ACA20000ACA0000411
60431+:104490008C6400488F8200248FB10014ACA4000803
60432+:1044A000ACA0000CACA00010906300059446001E68
60433+:1044B0003C02400D00031E0000C23025ACA30014D6
60434+:1044C0008FB00010ACA0001824040001ACA6001CA2
60435+:1044D0000A000DB827BD00208FBF00188FB100144F
60436+:1044E0008FB0001003E0000827BD00203C028000D0
60437+:1044F0009443007C3C02800834460100308400FF75
60438+:104500003065FFFF2402000524A34650A0C4000C20
60439+:104510005482000C3065FFFF90C2000D2C42000752
60440+:104520001040000724A30A0090C3000D24020014C9
60441+:104530000062100400A210210A00111F3045FFFF85
60442+:104540003065FFFF3C0280083442008003E0000831
60443+:10455000A44500143C03800834680080AD05003891
60444+:10456000346701008CE2001C308400FF00A210239D
60445+:104570001840000330C600FF24A2FFFCACE2001C80
60446+:1045800030820001504000083C0380088D02003C4E
60447+:1045900000A2102304410012240400058C620004D0
60448+:1045A00010A2000F3C0380088C62000414A2001EBD
60449+:1045B000000000003C0208008C4200D8304200207D
60450+:1045C000104000093C0280083462008090630008BB
60451+:1045D0009042004C144300043C0280082404000470
60452+:1045E0000A00110900000000344300803442010039
60453+:1045F000A040000C24020001A462001410C0000AB4
60454+:104600003C0280008C4401003C0380008C6201F875
60455+:104610000440FFFE24020002AC6401C0A06201C499
60456+:104620003C021000AC6201F803E00008000000004A
60457+:1046300027BDFFE800A61823AFBF00101860008058
60458+:10464000308800FF3C02800834470080A0E000244E
60459+:1046500034440100A0E000278C82001C00A210233B
60460+:1046600004400056000000008CE2003C94E3005C33
60461+:104670008CE4002C004530233063FFFF00C3182179
60462+:104680000083202B1080000400E018218CE2002C15
60463+:104690000A00117800A2102194E2005C3042FFFF72
60464+:1046A00000C2102100A21021AC62001C3C02800854
60465+:1046B000344400809482005C8C83001C3042FFFFF5
60466+:1046C0000002104000A210210043102B10400004F3
60467+:1046D000000000008C82001C0A00118B3C06800840
60468+:1046E0009482005C3042FFFF0002104000A21021C3
60469+:1046F0003C06800834C3010034C70080AC82001C33
60470+:10470000A060000CACE500388C62001C00A21023F5
60471+:104710001840000224A2FFFCAC62001C3102000120
60472+:10472000104000083C0380088CE2003C00A21023EB
60473+:1047300004410012240400058CC2000410A20010E1
60474+:104740008FBF00108C62000414A2004F8FBF0010B6
60475+:104750003C0208008C4200D8304200201040000A81
60476+:104760003C02800834620080906300089042004C54
60477+:10477000144300053C028008240400048FBF00108D
60478+:104780000A00110927BD001834430080344201009B
60479+:10479000A040000C24020001A46200143C0280002E
60480+:1047A0008C4401003C0380008C6201F80440FFFE51
60481+:1047B000240200020A0011D8000000008CE2001C54
60482+:1047C000004610230043102B54400001ACE5001CB0
60483+:1047D00094E2005C3042FFFF0062102B144000079F
60484+:1047E0002402000294E2005C8CE3001C3042FFFFD4
60485+:1047F00000621821ACE3001C24020002ACE5003882
60486+:104800000E000F5EA082000C1040001F8FBF001032
60487+:104810003C0280008C4401003C0380008C6201F863
60488+:104820000440FFFE24020002AC6401C0A06201C487
60489+:104830003C021000AC6201F80A0011F08FBF0010BA
60490+:1048400031020010104000108FBF00103C028008A1
60491+:10485000344500808CA3001C94A2005C00661823E1
60492+:104860003042FFFF006218213C023FFF3444FFFF4B
60493+:104870000083102B544000010080182100C3102138
60494+:10488000ACA2001C8FBF001003E0000827BD001879
60495+:1048900027BDFFE800C0402100A63023AFBF0010B5
60496+:1048A00018C00026308A00FF3C028008344900808E
60497+:1048B0008D24001C8D23002C008820230064182BDD
60498+:1048C0001060000F344701008CE2002000461021E8
60499+:1048D000ACE200208CE200200044102B1440000BBE
60500+:1048E0003C023FFF8CE2002000441023ACE2002099
60501+:1048F0009522005C3042FFFF0A0012100082202146
60502+:10490000ACE00020008620213C023FFF3443FFFF43
60503+:104910000064102B54400001006020213C028008FC
60504+:104920003442008000851821AC43001CA0400024C4
60505+:10493000A04000270A0012623C03800831420010A8
60506+:10494000104000433C0380083C06800834C40080CB
60507+:104950008C82003C004810235840003E34660080A2
60508+:104960009082002424420001A0820024908200242E
60509+:104970003C0308008C630024304200FF0043102BEE
60510+:10498000144000688FBF001034C201008C42001C2C
60511+:1049900000A2102318400063000000008CC3000434
60512+:1049A0009482005C006818233042FFFF0003184324
60513+:1049B000000210400043102A1040000500000000D3
60514+:1049C0008CC20004004810230A0012450002104364
60515+:1049D0009482005C3042FFFF000210403C068008D9
60516+:1049E000AC82002C34C5008094A2005C8CA4002C06
60517+:1049F00094A3005C3042FFFF00021040008220219F
60518+:104A00003063FFFF0083202101041021ACA2001CB1
60519+:104A10008CC2000434C60100ACC2001C2402000297
60520+:104A20000E000F5EA0C2000C1040003E8FBF0010B1
60521+:104A30003C0280008C4401003C0380008C6201F841
60522+:104A40000440FFFE240200020A001292000000004F
60523+:104A500034660080ACC50038346401008C82001CD0
60524+:104A600000A210231840000224A2FFFCAC82001C0C
60525+:104A7000314200015040000A3C0380088CC2003CD7
60526+:104A800000A2102304430014240400058C620004D7
60527+:104A900014A200033C0380080A00128424040005C9
60528+:104AA0008C62000414A2001F8FBF00103C0208009B
60529+:104AB0008C4200D8304200201040000A3C0280089E
60530+:104AC00034620080906300089042004C144300055B
60531+:104AD0003C028008240400048FBF00100A00110962
60532+:104AE00027BD00183443008034420100A040000C70
60533+:104AF00024020001A46200143C0280008C440100E6
60534+:104B00003C0380008C6201F80440FFFE2402000296
60535+:104B1000AC6401C0A06201C43C021000AC6201F8A8
60536+:104B20008FBF001003E0000827BD001827BDFFE875
60537+:104B30003C0A8008AFBF0010354900808D22003C40
60538+:104B400000C04021308400FF004610231840009D23
60539+:104B500030E700FF354701002402000100A63023A2
60540+:104B6000A0E0000CA0E0000DA522001418C0002455
60541+:104B7000308200108D23001C8D22002C0068182329
60542+:104B80000043102B1040000F000000008CE20020BA
60543+:104B900000461021ACE200208CE200200043102BE4
60544+:104BA0001440000B3C023FFF8CE200200043102326
60545+:104BB000ACE200209522005C3042FFFF0A0012C1E7
60546+:104BC00000621821ACE00020006618213C023FFF83
60547+:104BD0003446FFFF00C3102B5440000100C01821D1
60548+:104BE0003C0280083442008000651821AC43001C60
60549+:104BF000A0400024A04000270A00130F3C038008B7
60550+:104C0000104000403C0380088D22003C00481023E7
60551+:104C10005840003D34670080912200242442000166
60552+:104C2000A1220024912200243C0308008C6300246C
60553+:104C3000304200FF0043102B1440009A8FBF001039
60554+:104C40008CE2001C00A21023184000960000000017
60555+:104C50008D4300049522005C006818233042FFFF5A
60556+:104C600000031843000210400043102A10400005C2
60557+:104C7000012020218D420004004810230A0012F276
60558+:104C8000000210439522005C3042FFFF00021040FA
60559+:104C90003C068008AC82002C34C5008094A2005CE5
60560+:104CA0008CA4002C94A3005C3042FFFF0002104053
60561+:104CB000008220213063FFFF0083182101031021AF
60562+:104CC000ACA2001C8CC2000434C60100ACC2001CA3
60563+:104CD000240200020E000F5EA0C2000C1040007102
60564+:104CE0008FBF00103C0280008C4401003C03800018
60565+:104CF0008C6201F80440FFFE240200020A0013390E
60566+:104D00000000000034670080ACE500383466010024
60567+:104D10008CC2001C00A210231840000224A2FFFC39
60568+:104D2000ACC2001C30820001504000083C038008E7
60569+:104D30008CE2003C00A2102304430051240400052F
60570+:104D40008C62000410A2003E3C0380088C620004C8
60571+:104D500054A200548FBF00103C0208008C4200D8BF
60572+:104D600030420020104000063C028008346200807F
60573+:104D7000906300089042004C104300403C028008C1
60574+:104D80003443008034420100A040000C24020001A2
60575+:104D9000A46200143C0280008C4401003C038000AB
60576+:104DA0008C6201F80440FFFE24020002AC6401C0E2
60577+:104DB000A06201C43C021000AC6201F80A00137743
60578+:104DC0008FBF001024020005A120002714E2000A72
60579+:104DD0003C038008354301009062000D2C42000620
60580+:104DE000504000053C0380089062000D2442000101
60581+:104DF000A062000D3C03800834670080ACE50038F9
60582+:104E0000346601008CC2001C00A21023184000026E
60583+:104E100024A2FFFCACC2001C308200015040000AFA
60584+:104E20003C0380088CE2003C00A2102304410014E3
60585+:104E3000240400058C62000414A200033C038008D3
60586+:104E40000A00136E240400058C62000414A20015ED
60587+:104E50008FBF00103C0208008C4200D83042002076
60588+:104E60001040000A3C028008346200809063000811
60589+:104E70009042004C144300053C02800824040004C6
60590+:104E80008FBF00100A00110927BD001834430080AD
60591+:104E900034420100A040000C24020001A46200146E
60592+:104EA0008FBF001003E0000827BD00183C0B8008EE
60593+:104EB00027BDFFE83C028000AFBF00103442010074
60594+:104EC000356A00809044000A356901008C45001461
60595+:104ED0008D4800389123000C308400FF0105102319
60596+:104EE0001C4000B3306700FF2CE20006504000B1C8
60597+:104EF0008FBF00102402000100E2300430C2000322
60598+:104F00005440000800A8302330C2000C144000A117
60599+:104F100030C20030144000A38FBF00100A00143BC1
60600+:104F20000000000018C00024308200108D43001CD7
60601+:104F30008D42002C006818230043102B1040000FF6
60602+:104F4000000000008D22002000461021AD2200202C
60603+:104F50008D2200200043102B1440000B3C023FFF29
60604+:104F60008D22002000431023AD2200209542005CDA
60605+:104F70003042FFFF0A0013AF00621821AD2000206D
60606+:104F8000006618213C023FFF3446FFFF00C3102B90
60607+:104F90005440000100C018213C02800834420080C7
60608+:104FA00000651821AC43001CA0400024A04000274D
60609+:104FB0000A0013FD3C038008104000403C038008B9
60610+:104FC0008D42003C004810231840003D34670080AB
60611+:104FD0009142002424420001A14200249142002475
60612+:104FE0003C0308008C630024304200FF0043102B78
60613+:104FF000144000708FBF00108D22001C00A21023EF
60614+:105000001840006C000000008D6300049542005CB5
60615+:10501000006818233042FFFF0003184300021040CD
60616+:105020000043102A10400005014020218D62000439
60617+:10503000004810230A0013E0000210439542005C70
60618+:105040003042FFFF000210403C068008AC82002C7A
60619+:1050500034C5008094A2005C8CA4002C94A3005C56
60620+:105060003042FFFF00021040008220213063FFFF2A
60621+:105070000083182101031021ACA2001C8CC2000483
60622+:1050800034C60100ACC2001C240200020E000F5EF8
60623+:10509000A0C2000C104000478FBF00103C028000EF
60624+:1050A0008C4401003C0380008C6201F80440FFFE48
60625+:1050B000240200020A00142D000000003467008062
60626+:1050C000ACE50038346601008CC2001C00A210233D
60627+:1050D0001840000224A2FFFCACC2001C3082000178
60628+:1050E0005040000A3C0380088CE2003C00A21023E0
60629+:1050F00004430014240400058C62000414A200037D
60630+:105100003C0380080A00141F240400058C6200047C
60631+:1051100014A200288FBF00103C0208008C4200D867
60632+:10512000304200201040000A3C02800834620080B7
60633+:10513000906300089042004C144300053C02800834
60634+:10514000240400048FBF00100A00110927BD0018B5
60635+:105150003443008034420100A040000C24020001CE
60636+:10516000A46200143C0280008C4401003C038000D7
60637+:105170008C6201F80440FFFE24020002AC6401C00E
60638+:10518000A06201C43C021000AC6201F80A00143BAA
60639+:105190008FBF00108FBF0010010030210A00115A8C
60640+:1051A00027BD0018010030210A00129927BD001800
60641+:1051B0008FBF001003E0000827BD00183C038008E3
60642+:1051C0003464010024020003A082000C8C620004FD
60643+:1051D00003E00008AC82001C3C05800834A300807A
60644+:1051E0009062002734A501002406004324420001F8
60645+:1051F000A0620027906300273C0208008C42004810
60646+:10520000306300FF146200043C07602194A500EAAB
60647+:105210000A00090130A5FFFF03E0000800000000BC
60648+:1052200027BDFFE8AFBF00103C0280000E00144411
60649+:105230008C4401803C02800834430100A060000CD3
60650+:105240008C4200048FBF001027BD001803E0000847
60651+:10525000AC62001C27BDFFE03C028008AFBF001815
60652+:10526000AFB10014AFB000103445008034460100E7
60653+:105270003C0880008D09014090C3000C8CA4003CC8
60654+:105280008CA200381482003B306700FF9502007C3E
60655+:1052900090A30027146000093045FFFF2402000599
60656+:1052A00054E200083C04800890C2000D2442000132
60657+:1052B000A0C2000D0A00147F3C048008A0C0000DAD
60658+:1052C0003C048008348201009042000C2403000555
60659+:1052D000304200FF1443000A24A205DC348300801E
60660+:1052E000906200272C4200075040000524A20A00CB
60661+:1052F00090630027240200140062100400A2102111
60662+:105300003C108008361000803045FFFF012020212E
60663+:105310000E001444A60500149602005C8E030038AB
60664+:105320003C1180003042FFFF000210400062182153
60665+:10533000AE03001C0E0003328E24014092020025B1
60666+:1053400034420040A20200250E00033D8E2401409D
60667+:105350008E2401403C0380008C6201F80440FFFE73
60668+:1053600024020002AC6401C0A06201C43C0210002F
60669+:10537000AC6201F88FBF00188FB100148FB000101D
60670+:1053800003E0000827BD00203C0360103C02080039
60671+:1053900024420174AC62502C8C6250003C048000AA
60672+:1053A00034420080AC6250003C0208002442547C2D
60673+:1053B0003C010800AC2256003C020800244254384C
60674+:1053C0003C010800AC2256043C020002AC840008F8
60675+:1053D000AC82000C03E000082402000100A0302190
60676+:1053E0003C1C0800279C56083C0200023C050400B7
60677+:1053F00000852826008220260004102B2CA5000101
60678+:105400002C840001000210803C0308002463560035
60679+:105410000085202500431821108000030000102182
60680+:10542000AC6600002402000103E000080000000058
60681+:105430003C1C0800279C56083C0200023C05040066
60682+:1054400000852826008220260004102B2CA50001B0
60683+:105450002C840001000210803C03080024635600E5
60684+:105460000085202500431821108000050000102130
60685+:105470003C02080024425438AC62000024020001BF
60686+:1054800003E00008000000003C0200023C030400AE
60687+:1054900000821026008318262C4200012C63000194
60688+:1054A000004310251040000B000028213C1C080080
60689+:1054B000279C56083C0380008C62000824050001EC
60690+:1054C00000431025AC6200088C62000C00441025DB
60691+:1054D000AC62000C03E0000800A010213C1C080096
60692+:1054E000279C56083C0580008CA3000C0004202754
60693+:1054F000240200010064182403E00008ACA3000C9F
60694+:105500003C020002148200063C0560008CA208D018
60695+:105510002403FFFE0043102403E00008ACA208D0DF
60696+:105520003C02040014820005000000008CA208D098
60697+:105530002403FFFD00431024ACA208D003E00008C0
60698+:10554000000000003C02601A344200108C430080CE
60699+:1055500027BDFFF88C440084AFA3000093A3000094
60700+:10556000240200041462001AAFA4000493A20001F4
60701+:105570001040000797A300023062FFFC3C0380004C
60702+:10558000004310218C4200000A001536AFA200042F
60703+:105590003062FFFC3C03800000431021AC4400005B
60704+:1055A000A3A000003C0560008CA208D02403FFFEED
60705+:1055B0003C04601A00431024ACA208D08FA300045E
60706+:1055C0008FA2000034840010AC830084AC82008081
60707+:1055D00003E0000827BD000827BDFFE8AFBF0010AB
60708+:1055E0003C1C0800279C56083C0280008C43000CA1
60709+:1055F0008C420004004318243C0200021060001496
60710+:10560000006228243C0204003C04000210A00005B3
60711+:10561000006210243C0208008C4256000A00155B10
60712+:1056200000000000104000073C0404003C02080099
60713+:105630008C4256040040F809000000000A00156082
60714+:10564000000000000000000D3C1C0800279C5608CC
60715+:105650008FBF001003E0000827BD0018800802403B
60716+:1056600080080100800800808008000000000C8095
60717+:105670000000320008000E9808000EF408000F88A1
60718+:1056800008001028080010748008010080080080BD
60719+:10569000800800000A000028000000000000000050
60720+:1056A0000000000D6370362E322E316200000000C3
60721+:1056B00006020104000000000000000000000000DD
60722+:1056C000000000000000000038003C000000000066
60723+:1056D00000000000000000000000000000000020AA
60724+:1056E00000000000000000000000000000000000BA
60725+:1056F00000000000000000000000000000000000AA
60726+:10570000000000000000000021003800000000013F
60727+:105710000000002B000000000000000400030D400A
60728+:105720000000000000000000000000000000000079
60729+:105730000000000000000000100000030000000056
60730+:105740000000000D0000000D3C020800244259AC8E
60731+:105750003C03080024635BF4AC4000000043202BB2
60732+:105760001480FFFD244200043C1D080037BD9FFC4F
60733+:1057700003A0F0213C100800261000A03C1C0800EB
60734+:10578000279C59AC0E0002F6000000000000000D3E
60735+:1057900027BDFFB4AFA10000AFA20004AFA3000873
60736+:1057A000AFA4000CAFA50010AFA60014AFA700185F
60737+:1057B000AFA8001CAFA90020AFAA0024AFAB0028FF
60738+:1057C000AFAC002CAFAD0030AFAE0034AFAF00389F
60739+:1057D000AFB8003CAFB90040AFBC0044AFBF004819
60740+:1057E0000E000820000000008FBF00488FBC00445E
60741+:1057F0008FB900408FB8003C8FAF00388FAE0034B7
60742+:105800008FAD00308FAC002C8FAB00288FAA002406
60743+:105810008FA900208FA8001C8FA700188FA6001446
60744+:105820008FA500108FA4000C8FA300088FA2000486
60745+:105830008FA1000027BD004C3C1B60188F7A5030B0
60746+:10584000377B502803400008AF7A000000A01821E1
60747+:1058500000801021008028213C0460003C0760008B
60748+:105860002406000810600006348420788C42000072
60749+:10587000ACE220088C63000003E00008ACE3200CDD
60750+:105880000A000F8100000000240300403C02600079
60751+:1058900003E00008AC4320003C0760008F86000452
60752+:1058A0008CE520740086102100A2182B14600007DC
60753+:1058B000000028218F8AFDA024050001A1440013C7
60754+:1058C0008F89000401244021AF88000403E0000810
60755+:1058D00000A010218F84FDA08F8500049086001306
60756+:1058E00030C300FF00A31023AF82000403E00008D0
60757+:1058F000A08000138F84FDA027BDFFE8AFB000108B
60758+:10590000AFBF001490890011908700112402002875
60759+:10591000312800FF3906002830E300FF2485002CE1
60760+:105920002CD00001106200162484001C0E00006EB2
60761+:10593000000000008F8FFDA03C05600024020204DF
60762+:1059400095EE003E95ED003C000E5C0031ACFFFF93
60763+:10595000016C5025ACAA2010520000012402000462
60764+:10596000ACA22000000000000000000000000000C9
60765+:105970008FBF00148FB0001003E0000827BD00188F
60766+:105980000A0000A6000028218F85FDA027BDFFD8B2
60767+:10599000AFBF0020AFB3001CAFB20018AFB100140E
60768+:1059A000AFB000100080982190A4001124B0001C1A
60769+:1059B00024B1002C308300FF386200280E000090D4
60770+:1059C0002C5200010E00009800000000020020216F
60771+:1059D0001240000202202821000028210E00006E43
60772+:1059E000000000008F8DFDA03C0880003C05600099
60773+:1059F00095AC003E95AB003C02683025000C4C0095
60774+:105A0000316AFFFF012A3825ACA7201024020202C8
60775+:105A1000ACA6201452400001240200028FBF0020D7
60776+:105A20008FB3001C8FB200188FB100148FB000101C
60777+:105A300027BD002803E00008ACA2200027BDFFE03E
60778+:105A4000AFB20018AFB10014AFB00010AFBF001C70
60779+:105A50003C1160008E2320748F82000430D0FFFF41
60780+:105A600030F2FFFF1062000C2406008F0E00006E63
60781+:105A7000000000003C06801F0010440034C5FF00F9
60782+:105A80000112382524040002AE2720100000302126
60783+:105A9000AE252014AE2420008FBF001C8FB200184A
60784+:105AA0008FB100148FB0001000C0102103E0000877
60785+:105AB00027BD002027BDFFE0AFB0001030D0FFFFB2
60786+:105AC000AFBF0018AFB100140E00006E30F1FFFF41
60787+:105AD00000102400009180253C036000AC70201071
60788+:105AE0008FBF00188FB100148FB000102402000483
60789+:105AF000AC62200027BD002003E000080000102158
60790+:105B000027BDFFE03C046018AFBF0018AFB1001420
60791+:105B1000AFB000108C8850002403FF7F34028071E6
60792+:105B20000103382434E5380C241F00313C1980006F
60793+:105B3000AC8550003C11800AAC8253BCAF3F0008DA
60794+:105B40000E00054CAF9100400E00050A3C116000AC
60795+:105B50000E00007D000000008E3008083C0F570941
60796+:105B60002418FFF00218602435EEE00035EDF00057
60797+:105B7000018E5026018D58262D4600012D69000109
60798+:105B8000AF86004C0E000D09AF8900503C06601630
60799+:105B90008CC700003C0860148D0500A03C03FFFF8B
60800+:105BA00000E320243C02535300052FC2108200550D
60801+:105BB00034D07C00960201F2A780006C10400003F4
60802+:105BC000A780007C384B1E1EA78B006C960201F844
60803+:105BD000104000048F8D0050384C1E1EA78C007C96
60804+:105BE0008F8D005011A000058F83004C240E0020E3
60805+:105BF000A78E007CA78E006C8F83004C1060000580
60806+:105C00009785007C240F0020A78F007CA78F006C55
60807+:105C10009785007C2CB8008153000001240500808A
60808+:105C20009784006C2C91040152200001240404008C
60809+:105C30001060000B3C0260008FBF00188FB1001491
60810+:105C40008FB0001027BD0020A784006CA785007CC2
60811+:105C5000A380007EA780007403E00008A780009264
60812+:105C60008C4704382419103C30FFFFFF13F9000360
60813+:105C700030A8FFFF1100004624030050A380007EDF
60814+:105C80009386007E50C00024A785007CA780007CFE
60815+:105C90009798007CA780006CA7800074A780009272
60816+:105CA0003C010800AC3800800E00078700000000AF
60817+:105CB0003C0F60008DED0808240EFFF03C0B600ED9
60818+:105CC000260C0388356A00100000482100002821B6
60819+:105CD00001AE20243C105709AF8C0010AF8A004859
60820+:105CE000AF89001810900023AF8500148FBF0018F3
60821+:105CF0008FB100148FB0001027BD002003E0000812
60822+:105D0000AF80005400055080014648218D260004D4
60823+:105D10000A00014800D180219798007CA784006C7C
60824+:105D2000A7800074A78000923C010800AC38008076
60825+:105D30000E000787000000003C0F60008DED080892
60826+:105D4000240EFFF03C0B600E260C0388356A001011
60827+:105D5000000048210000282101AE20243C105709F2
60828+:105D6000AF8C0010AF8A0048AF8900181490FFDF95
60829+:105D7000AF85001424110001AF9100548FBF0018AB
60830+:105D80008FB100148FB0001003E0000827BD002081
60831+:105D90000A00017BA383007E3083FFFF8F880040D1
60832+:105DA0008F87003C000321403C0580003C020050EE
60833+:105DB000008248253C0660003C0A010034AC040027
60834+:105DC0008CCD08E001AA58241160000500000000F5
60835+:105DD0008CCF08E024E7000101EA7025ACCE08E092
60836+:105DE0008D19001001805821ACB900388D180014AD
60837+:105DF000ACB8003CACA9003000000000000000007E
60838+:105E00000000000000000000000000000000000092
60839+:105E100000000000000000003C0380008C640000D3
60840+:105E2000308200201040FFFD3C0F60008DED08E047
60841+:105E30003C0E010001AE18241460FFE100000000D8
60842+:105E4000AF87003C03E00008AF8B00588F8500400F
60843+:105E5000240BFFF03C06800094A7001A8CA90024B4
60844+:105E600030ECFFFF000C38C000EB5024012A402129
60845+:105E7000ACC8003C8CA400248CC3003C00831023DD
60846+:105E800018400033000000008CAD002025A2000166
60847+:105E90003C0F0050ACC2003835EE00103C068000CC
60848+:105EA000ACCE003000000000000000000000000048
60849+:105EB00000000000000000000000000000000000E2
60850+:105EC000000000003C0480008C9900003338002062
60851+:105ED0001300FFFD30E20008104000173C0980006D
60852+:105EE0008C880408ACA800108C83040CACA30014AC
60853+:105EF0003C1900203C188000AF19003094AE001807
60854+:105F000094AF001C01CF3021A4A6001894AD001A54
60855+:105F100025A70001A4A7001A94AB001A94AC001E98
60856+:105F2000118B00030000000003E0000800000000E7
60857+:105F300003E00008A4A0001A8D2A0400ACAA0010F7
60858+:105F40008D240404ACA400140A0002183C1900209B
60859+:105F50008CA200200A0002003C0F00500A0001EE53
60860+:105F60000000000027BDFFE8AFBF00100E000232A6
60861+:105F7000000000008F8900408FBF00103C038000AC
60862+:105F8000A520000A9528000A9527000427BD0018BF
60863+:105F90003105FFFF30E6000F0006150000A22025A6
60864+:105FA00003E00008AC6400803C0508008CA50020DC
60865+:105FB0008F83000C27BDFFE8AFB00010AFBF001407
60866+:105FC00010A300100000802124040001020430040A
60867+:105FD00000A6202400C3102450440006261000010F
60868+:105FE000001018802787FDA41480000A006718217C
60869+:105FF000261000012E0900025520FFF38F83000CAC
60870+:10600000AF85000C8FBF00148FB0001003E00008B4
60871+:1060100027BD00188C6800003C058000ACA8002457
60872+:106020000E000234261000013C0508008CA500205B
60873+:106030000A0002592E0900022405000100851804F7
60874+:106040003C0408008C84002027BDFFC8AFBF00348B
60875+:1060500000831024AFBE0030AFB7002CAFB60028CD
60876+:10606000AFB50024AFB40020AFB3001CAFB200182E
60877+:10607000AFB1001410400051AFB000108F84004049
60878+:10608000948700069488000A00E8302330D5FFFF8B
60879+:1060900012A0004B8FBF0034948B0018948C000A20
60880+:1060A000016C50233142FFFF02A2482B1520000251
60881+:1060B00002A02021004020212C8F000515E00002C5
60882+:1060C00000809821241300040E0001C102602021E9
60883+:1060D0008F87004002609021AF80004494F4000A52
60884+:1060E000026080211260004E3291FFFF3C1670006A
60885+:1060F0003C1440003C1E20003C1760008F99005863
60886+:106100008F380000031618241074004F0283F82BF8
60887+:1061100017E0003600000000107E00478F86004424
60888+:1061200014C0003A2403000102031023022320219B
60889+:106130003050FFFF1600FFF13091FFFF8F870040C6
60890+:106140003C1100203C108000AE11003094EB000A9E
60891+:106150003C178000024B5021A4EA000A94E9000A8F
60892+:1061600094E800043123FFFF3106000F00062D00E4
60893+:106170000065F025AEFE008094F3000A94F6001846
60894+:1061800012D30036001221408CFF00148CF4001052
60895+:1061900003E468210000C02101A4782B029870213B
60896+:1061A00001CF6021ACED0014ACEC001002B238233A
60897+:1061B00030F5FFFF16A0FFB88F8400408FBF00347A
60898+:1061C0008FBE00308FB7002C8FB600288FB500240B
60899+:1061D0008FB400208FB3001C8FB200188FB1001451
60900+:1061E0008FB0001003E0000827BD00381477FFCC03
60901+:1061F0008F8600440E000EE202002021004018218C
60902+:106200008F86004410C0FFC9020310230270702360
60903+:106210008F87004001C368210A0002E431B2FFFF0A
60904+:106220008F86004414C0FFC93C1100203C10800040
60905+:106230000A0002AEAE1100300E00046602002021FA
60906+:106240000A0002DB00401821020020210E0009395B
60907+:10625000022028210A0002DB004018210E0001EE76
60908+:10626000000000000A0002C702B2382327BDFFC8A1
60909+:10627000AFB7002CAFB60028AFB50024AFB40020F4
60910+:10628000AFB3001CAFB20018AFB10014AFB0001034
60911+:10629000AFBF00300E00011B241300013C047FFF40
60912+:1062A0003C0380083C0220003C010800AC20007048
60913+:1062B0003496FFFF34770080345200033C1512C03F
60914+:1062C000241400013C1080002411FF800E000245C0
60915+:1062D000000000008F8700488F8B00188F89001402
60916+:1062E0008CEA00EC8CE800E8014B302B01092823F4
60917+:1062F00000A6102314400006014B18231440000E82
60918+:106300003C05800002A3602B1180000B0000000000
60919+:106310003C0560008CEE00EC8CED00E88CA4180CC1
60920+:10632000AF8E001804800053AF8D00148F8F0010C3
60921+:10633000ADF400003C0580008CBF00003BF900017B
60922+:10634000333800011700FFE13C0380008C6201003C
60923+:1063500024060C0010460009000000008C680100B3
60924+:106360002D043080548000103C0480008C690100B2
60925+:106370002D2331811060000C3C0480008CAA0100A8
60926+:1063800011460004000020218CA6010024C5FF81D5
60927+:1063900030A400FF8E0B01000E000269AE0B00243A
60928+:1063A0000A00034F3C0480008C8D01002DAC3300AB
60929+:1063B00011800022000000003C0708008CE70098D4
60930+:1063C00024EE00013C010800AC2E00983C04800043
60931+:1063D0008C8201001440000300000000566000148D
60932+:1063E0003C0440008C9F01008C9801000000982123
60933+:1063F00003F1C82400193940330F007F00EF7025E6
60934+:1064000001D26825AC8D08308C8C01008C85010090
60935+:10641000258B0100017130240006514030A3007F1C
60936+:106420000143482501324025AC8808303C04400037
60937+:10643000AE0401380A00030E000000008C99010030
60938+:10644000240F0020AC99002092F80000330300FFD5
60939+:10645000106F000C241F0050547FFFDD3C048000AF
60940+:106460008C8401000E00154E000000000A00034F4E
60941+:106470003C04800000963824ACA7180C0A000327BF
60942+:106480008F8F00108C8501000E0008F72404008017
60943+:106490000A00034F3C04800000A4102B24030001D9
60944+:1064A00010400009000030210005284000A4102BF6
60945+:1064B00004A00003000318405440FFFC00052840DE
60946+:1064C0005060000A0004182B0085382B54E00004AB
60947+:1064D0000003184200C33025008520230003184222
60948+:1064E0001460FFF9000528420004182B03E000089F
60949+:1064F00000C310213084FFFF30C600FF3C0780003E
60950+:106500008CE201B80440FFFE00064C000124302557
60951+:106510003C08200000C820253C031000ACE00180AE
60952+:10652000ACE50184ACE4018803E00008ACE301B809
60953+:106530003C0660008CC5201C2402FFF03083020062
60954+:10654000308601001060000E00A2282434A500014E
60955+:106550003087300010E0000530830C0034A50004C3
60956+:106560003C04600003E00008AC85201C1060FFFDC7
60957+:106570003C04600034A5000803E00008AC85201C42
60958+:1065800054C0FFF334A500020A0003B03087300086
60959+:1065900027BDFFE8AFB00010AFBF00143C0760009C
60960+:1065A000240600021080001100A080218F83005873
60961+:1065B0000E0003A78C6400188F8200580000202171
60962+:1065C000240600018C45000C0E000398000000001A
60963+:1065D0001600000224020003000010218FBF0014E7
60964+:1065E0008FB0001003E0000827BD00188CE8201CC5
60965+:1065F0002409FFF001092824ACE5201C8F870058EE
60966+:106600000A0003CD8CE5000C3C02600E00804021A6
60967+:1066100034460100240900180000000000000000BA
60968+:10662000000000003C0A00503C0380003547020097
60969+:10663000AC68003834640400AC65003CAC670030E2
60970+:106640008C6C0000318B00201160FFFD2407FFFFE0
60971+:106650002403007F8C8D00002463FFFF248400044A
60972+:10666000ACCD00001467FFFB24C60004000000004E
60973+:10667000000000000000000024A402000085282B78
60974+:106680003C0300203C0E80002529FFFF010540212E
60975+:10669000ADC300301520FFE00080282103E0000892
60976+:1066A000000000008F82005827BDFFD8AFB3001C48
60977+:1066B000AFBF0020AFB20018AFB10014AFB00010F0
60978+:1066C00094460002008098218C5200182CC300814F
60979+:1066D0008C4800048C4700088C51000C8C49001039
60980+:1066E000106000078C4A00142CC4000414800013AE
60981+:1066F00030EB000730C5000310A0001000000000C0
60982+:106700002410008B02002021022028210E00039873
60983+:10671000240600031660000224020003000010217A
60984+:106720008FBF00208FB3001C8FB200188FB10014F0
60985+:106730008FB0001003E0000827BD00281560FFF1AE
60986+:106740002410008B3C0C80003C030020241F00011F
60987+:10675000AD830030AF9F0044000000000000000047
60988+:10676000000000002419FFF024D8000F031978243A
60989+:106770003C1000D0AD88003801F0702524CD000316
60990+:106780003C08600EAD87003C35850400AD8E0030BE
60991+:10679000000D38823504003C3C0380008C6B000007
60992+:1067A000316200201040FFFD0000000010E00008F2
60993+:1067B00024E3FFFF2407FFFF8CA800002463FFFFF2
60994+:1067C00024A50004AC8800001467FFFB24840004A7
60995+:1067D0003C05600EACA60038000000000000000080
60996+:1067E000000000008F8600543C0400203C0780001D
60997+:1067F000ACE4003054C000060120202102402021DA
60998+:106800000E0003A7000080210A00041D02002021C1
60999+:106810000E0003DD01402821024020210E0003A7C5
61000+:10682000000080210A00041D0200202127BDFFE096
61001+:10683000AFB200183092FFFFAFB10014AFBF001C21
61002+:10684000AFB000101640000D000088210A0004932C
61003+:106850000220102124050003508500278CE5000C40
61004+:106860000000000D262800013111FFFF24E2002066
61005+:106870000232802B12000019AF8200588F82004430
61006+:10688000144000168F8700583C0670003C0320001F
61007+:106890008CE5000000A62024148300108F84006083
61008+:1068A000000544023C09800000A980241480FFE90F
61009+:1068B000310600FF2CCA000B5140FFEB26280001D7
61010+:1068C000000668803C0E080025CE575801AE6021B6
61011+:1068D0008D8B0000016000080000000002201021E4
61012+:1068E0008FBF001C8FB200188FB100148FB0001042
61013+:1068F00003E0000827BD00200E0003982404008454
61014+:106900001600FFD88F8700580A000474AF8000601B
61015+:10691000020028210E0003BF240400018F870058C5
61016+:106920000A000474AF820060020028210E0003BF39
61017+:10693000000020210A0004A38F8700580E000404E1
61018+:10694000020020218F8700580A000474AF82006083
61019+:1069500030AFFFFF000F19C03C0480008C9001B8DD
61020+:106960000600FFFE3C1920043C181000AC83018097
61021+:10697000AC800184AC990188AC9801B80A00047518
61022+:106980002628000190E2000390E30002000020218D
61023+:106990000002FE0000033A0000FF2825240600083C
61024+:1069A0000E000398000000001600FFDC2402000324
61025+:1069B0008F870058000010210A000474AF82006025
61026+:1069C00090E8000200002021240600090A0004C308
61027+:1069D00000082E0090E4000C240900FF308500FF21
61028+:1069E00010A900150000302190F9000290F8000372
61029+:1069F000308F00FF94EB000400196E000018740043
61030+:106A0000000F62000186202501AE5025014B28258C
61031+:106A10003084FF8B0A0004C32406000A90E30002BE
61032+:106A200090FF0004000020210003360000DF28252D
61033+:106A30000A0004C32406000B0A0004D52406008BB8
61034+:106A4000000449C23127003F000443423C02800059
61035+:106A500000082040240316802CE60020AC43002CC4
61036+:106A600024EAFFE02482000114C0000330A900FFE3
61037+:106A700000801021314700FF000260803C0D800043
61038+:106A8000240A0001018D20213C0B000E00EA28049D
61039+:106A9000008B302111200005000538278CCE000026
61040+:106AA00001C5382503E00008ACC700008CD8000001
61041+:106AB0000307782403E00008ACCF000027BDFFE007
61042+:106AC000AFB10014AFB00010AFBF00183C076000BA
61043+:106AD0008CE408083402F0003C1160003083F000C0
61044+:106AE000240501C03C04800E000030211062000625
61045+:106AF000241000018CEA08083149F0003928E00030
61046+:106B00000008382B000780403C0D0200AE2D081411
61047+:106B1000240C16803C0B80008E2744000E000F8B47
61048+:106B2000AD6C002C120000043C02169124050001FB
61049+:106B3000120500103C023D2C345800E0AE384408E9
61050+:106B40003C1108008E31007C8FBF00183C066000AD
61051+:106B500000118540360F16808FB100148FB00010E1
61052+:106B60003C0E020027BD0020ACCF442003E000080B
61053+:106B7000ACCE08103C0218DA345800E0AE384408B5
61054+:106B80003C1108008E31007C8FBF00183C0660006D
61055+:106B900000118540360F16808FB100148FB00010A1
61056+:106BA0003C0E020027BD0020ACCF442003E00008CB
61057+:106BB000ACCE08100A0004EB240500010A0004EB27
61058+:106BC0000000282124020400A7820024A780001CC2
61059+:106BD000000020213C06080024C65A582405FFFF67
61060+:106BE00024890001000440803124FFFF01061821A0
61061+:106BF0002C87002014E0FFFAAC6500002404040098
61062+:106C0000A7840026A780001E000020213C06080063
61063+:106C100024C65AD82405FFFF248D0001000460809B
61064+:106C200031A4FFFF018658212C8A00201540FFFA6D
61065+:106C3000AD650000A7800028A7800020A780002263
61066+:106C4000000020213C06080024C65B582405FFFFF5
61067+:106C5000249900010004C0803324FFFF030678213B
61068+:106C60002C8E000415C0FFFAADE500003C05600065
61069+:106C70008CA73D002403E08F00E31024344601403C
61070+:106C800003E00008ACA63D002487007F000731C266
61071+:106C900024C5FFFF000518C2246400013082FFFFF5
61072+:106CA000000238C0A78400303C010800AC27003047
61073+:106CB000AF80002C0000282100002021000030219E
61074+:106CC0002489000100A728213124FFFF2CA81701E7
61075+:106CD000110000032C8300801460FFF924C600011A
61076+:106CE00000C02821AF86002C10C0001DA786002AF6
61077+:106CF00024CAFFFF000A11423C08080025085B581F
61078+:106D00001040000A00002021004030212407FFFF2E
61079+:106D1000248E00010004688031C4FFFF01A86021B7
61080+:106D20000086582B1560FFFAAD87000030A2001FC7
61081+:106D30005040000800043080240300010043C804D0
61082+:106D400000041080004878212738FFFF03E0000886
61083+:106D5000ADF8000000C820212405FFFFAC8500002D
61084+:106D600003E000080000000030A5FFFF30C6FFFF71
61085+:106D700030A8001F0080602130E700FF0005294295
61086+:106D80000000502110C0001D24090001240B000147
61087+:106D900025180001010B2004330800FF0126782686
61088+:106DA000390E00202DED00012DC2000101A2182591
61089+:106DB0001060000D014450250005C880032C4021BF
61090+:106DC0000100182110E0000F000A20278D040000A8
61091+:106DD000008A1825AD03000024AD00010000402109
61092+:106DE0000000502131A5FFFF252E000131C9FFFF12
61093+:106DF00000C9102B1040FFE72518000103E0000830
61094+:106E0000000000008D0A0000014440240A0005D162
61095+:106E1000AC68000027BDFFE830A5FFFF30C6FFFFCC
61096+:106E2000AFB00010AFBF001430E7FFFF00005021EB
61097+:106E30003410FFFF0000602124AF001F00C0482174
61098+:106E4000241800012419002005E0001601E010219B
61099+:106E50000002F943019F682A0009702B01AE40240B
61100+:106E600011000017000C18800064102110E00005CC
61101+:106E70008C4B000000F840040008382301675824B8
61102+:106E800000003821154000410000402155600016E7
61103+:106E90003169FFFF258B0001316CFFFF05E1FFEC3D
61104+:106EA00001E0102124A2003E0002F943019F682A5C
61105+:106EB0000009702B01AE40241500FFEB000C188078
61106+:106EC000154600053402FFFF020028210E0005B51B
61107+:106ED00000003821020010218FBF00148FB0001075
61108+:106EE00003E0000827BD00181520000301601821E9
61109+:106EF000000B1C0224080010306A00FF154000053A
61110+:106F0000306E000F250D000800031A0231A800FFA3
61111+:106F1000306E000F15C00005307F000325100004FF
61112+:106F200000031902320800FF307F000317E000055C
61113+:106F3000386900012502000200031882304800FF72
61114+:106F4000386900013123000110600004310300FFA3
61115+:106F5000250A0001314800FF310300FF000C6940A1
61116+:106F600001A34021240A000110CAFFD53110FFFF00
61117+:106F7000246E000131C800FF1119FFC638C9000195
61118+:106F80002D1F002053E0001C258B0001240D000163
61119+:106F90000A000648240E002051460017258B0001E8
61120+:106FA00025090001312800FF2D0900205120001281
61121+:106FB000258B000125430001010D5004014B1024D5
61122+:106FC000250900011440FFF4306AFFFF3127FFFF5D
61123+:106FD00010EE000C2582FFFF304CFFFF0000502117
61124+:106FE0003410FFFF312800FF2D0900205520FFF24B
61125+:106FF00025430001258B0001014648260A000602B0
61126+:10700000316CFFFF00003821000050210A000654B7
61127+:107010003410FFFF27BDFFD8AFB0001030F0FFFFE6
61128+:10702000AFB10014001039423211FFE000071080A8
61129+:10703000AFB3001C00B1282330D3FFFFAFB200185C
61130+:1070400030A5FFFF00809021026030210044202104
61131+:10705000AFBF00200E0005E03207001F022288218A
61132+:107060003403FFFF0240202102002821026030216A
61133+:1070700000003821104300093231FFFF02201021A7
61134+:107080008FBF00208FB3001C8FB200188FB1001487
61135+:107090008FB0001003E0000827BD00280E0005E0B7
61136+:1070A0000000000000408821022010218FBF002036
61137+:1070B0008FB3001C8FB200188FB100148FB0001076
61138+:1070C00003E0000827BD0028000424003C03600002
61139+:1070D000AC603D0810A00002348210063482101605
61140+:1070E00003E00008AC623D0427BDFFE0AFB0001034
61141+:1070F000309000FF2E020006AFBF001810400008BD
61142+:10710000AFB10014001030803C03080024635784A2
61143+:1071100000C328218CA400000080000800000000AB
61144+:10712000000020218FBF00188FB100148FB0001015
61145+:107130000080102103E0000827BD00209791002A5D
61146+:1071400016200051000020213C020800904200332C
61147+:107150000A0006BB00000000978D002615A0003134
61148+:10716000000020210A0006BB2402000897870024A3
61149+:1071700014E0001A00001821006020212402000100
61150+:107180001080FFE98FBF0018000429C2004530219C
61151+:1071900000A6582B1160FFE43C0880003C0720004B
61152+:1071A000000569C001A76025AD0C00203C038008E4
61153+:1071B0002402001F2442FFFFAC6000000441FFFDD9
61154+:1071C0002463000424A5000100A6702B15C0FFF560
61155+:1071D000000569C00A0006A58FBF00189787001C2C
61156+:1071E0003C04080024845A58240504000E0006605C
61157+:1071F00024060001978B002424440001308AFFFFFD
61158+:107200002569FFFF2D48040000402821150000409B
61159+:10721000A789002424AC3800000C19C00A0006B964
61160+:10722000A780001C9787001E3C04080024845AD8BD
61161+:10723000240504000E00066024060001979900262C
61162+:10724000244400013098FFFF272FFFFF2F0E04007A
61163+:107250000040882115C0002CA78F0026A780001EA3
61164+:107260003A020003262401003084FFFF0E00068D41
61165+:107270002C4500010011F8C027F00100001021C0CA
61166+:107280000A0006BB240200089785002E978700227B
61167+:107290003C04080024845B580E00066024060001AC
61168+:1072A0009787002A8F89002C2445000130A8FFFF12
61169+:1072B00024E3FFFF0109302B0040802114C0001897
61170+:1072C000A783002AA7800022978500300E000F7543
61171+:1072D00002002021244A05003144FFFF0E00068DE4
61172+:1072E000240500013C05080094A500320E000F752E
61173+:1072F00002002021244521003C0208009042003376
61174+:107300000A0006BB000521C00A0006F3A784001E80
61175+:1073100024AC3800000C19C00A0006B9A784001C70
61176+:107320000A00070DA7850022308400FF27BDFFE873
61177+:107330002C820006AFBF0014AFB000101040001543
61178+:1073400000A03821000440803C0308002463579CBF
61179+:10735000010328218CA40000008000080000000028
61180+:1073600024CC007F000751C2000C59C23170FFFFCE
61181+:107370002547C40030E5FFFF2784001C02003021B0
61182+:107380000E0005B52407000197860028020620217B
61183+:10739000A78400288FBF00148FB0001003E00008FE
61184+:1073A00027BD00183C0508008CA50030000779C2F5
61185+:1073B0000E00038125E4DF003045FFFF3C04080098
61186+:1073C00024845B58240600010E0005B52407000143
61187+:1073D000978E002A8FBF00148FB0001025CD0001BA
61188+:1073E00027BD001803E00008A78D002A0007C9C2C6
61189+:1073F0002738FF00001878C231F0FFFF3C04080076
61190+:1074000024845AD802002821240600010E0005B564
61191+:1074100024070001978D0026260E0100000E84002F
61192+:1074200025AC00013C0B6000A78C0026AD603D0838
61193+:1074300036040006000030213C0760008CE23D0469
61194+:10744000305F000617E0FFFD24C9000100061B00A5
61195+:10745000312600FF006440252CC50004ACE83D0443
61196+:1074600014A0FFF68FBF00148FB0001003E00008D7
61197+:1074700027BD0018000751C22549C8002406000195
61198+:10748000240700013C04080024845A580E0005B566
61199+:107490003125FFFF978700248FBF00148FB00010A5
61200+:1074A00024E6000127BD001803E00008A786002499
61201+:1074B0003C0660183C090800252900FCACC9502C8A
61202+:1074C0008CC850003C0580003C020002350700805B
61203+:1074D000ACC750003C04080024841FE03C030800B3
61204+:1074E00024631F98ACA50008ACA2000C3C01080066
61205+:1074F000AC2459A43C010800AC2359A803E00008BF
61206+:107500002402000100A030213C1C0800279C59AC3B
61207+:107510003C0C04003C0B0002008B3826008C4026FB
61208+:107520002CE200010007502B2D050001000A4880C5
61209+:107530003C030800246359A4004520250123182199
61210+:107540001080000300001021AC660000240200013E
61211+:1075500003E00008000000003C1C0800279C59AC18
61212+:107560003C0B04003C0A0002008A3026008B3826BF
61213+:107570002CC200010006482B2CE5000100094080C8
61214+:107580003C030800246359A4004520250103182169
61215+:1075900010800005000010213C0C0800258C1F986D
61216+:1075A000AC6C00002402000103E0000800000000B1
61217+:1075B0003C0900023C080400008830260089382677
61218+:1075C0002CC30001008028212CE400010083102539
61219+:1075D0001040000B000030213C1C0800279C59ACD7
61220+:1075E0003C0A80008D4E00082406000101CA68256F
61221+:1075F000AD4D00088D4C000C01855825AD4B000C9D
61222+:1076000003E0000800C010213C1C0800279C59AC76
61223+:107610003C0580008CA6000C0004202724020001F9
61224+:1076200000C4182403E00008ACA3000C3C020002D4
61225+:107630001082000B3C0560003C070400108700032B
61226+:107640000000000003E00008000000008CA908D042
61227+:10765000240AFFFD012A402403E00008ACA808D05A
61228+:107660008CA408D02406FFFE0086182403E000083E
61229+:10767000ACA308D03C05601A34A600108CC300806F
61230+:1076800027BDFFF88CC50084AFA3000093A40000C1
61231+:107690002402001010820003AFA5000403E00008DC
61232+:1076A00027BD000893A7000114E0001497AC000266
61233+:1076B00097B800023C0F8000330EFFFC01CF682119
61234+:1076C000ADA50000A3A000003C0660008CC708D058
61235+:1076D0002408FFFE3C04601A00E82824ACC508D04A
61236+:1076E0008FA300048FA200003499001027BD00086A
61237+:1076F000AF22008003E00008AF2300843C0B800031
61238+:10770000318AFFFC014B48218D2800000A00080C3B
61239+:10771000AFA8000427BDFFE8AFBF00103C1C080065
61240+:10772000279C59AC3C0580008CA4000C8CA2000462
61241+:107730003C0300020044282410A0000A00A31824DF
61242+:107740003C0604003C0400021460000900A610245A
61243+:107750001440000F3C0404000000000D3C1C080015
61244+:10776000279C59AC8FBF001003E0000827BD00180C
61245+:107770003C0208008C4259A40040F80900000000B7
61246+:107780003C1C0800279C59AC0A0008358FBF00102C
61247+:107790003C0208008C4259A80040F8090000000093
61248+:1077A0000A00083B000000003C0880008D0201B880
61249+:1077B0000440FFFE35090180AD2400003C031000A9
61250+:1077C00024040040AD250004A1240008A1260009DE
61251+:1077D000A527000A03E00008AD0301B83084FFFFCD
61252+:1077E0000080382130A5FFFF000020210A00084555
61253+:1077F000240600803087FFFF8CA400002406003898
61254+:107800000A000845000028218F8300788F860070C9
61255+:107810001066000B008040213C07080024E75B68ED
61256+:10782000000328C000A710218C440000246300013D
61257+:10783000108800053063000F5466FFFA000328C06B
61258+:1078400003E00008000010213C07080024E75B6CFF
61259+:1078500000A7302103E000088CC200003C03900028
61260+:1078600034620001008220253C038000AC640020CB
61261+:107870008C65002004A0FFFE0000000003E000086B
61262+:10788000000000003C0280003443000100832025FA
61263+:1078900003E00008AC44002027BDFFE0AFB10014B6
61264+:1078A0003091FFFFAFB00010AFBF001812200013DF
61265+:1078B00000A080218CA20000240400022406020003
61266+:1078C0001040000F004028210E0007250000000096
61267+:1078D00000001021AE000000022038218FBF0018E8
61268+:1078E0008FB100148FB0001000402021000028212B
61269+:1078F000000030210A00084527BD00208CA20000AE
61270+:10790000022038218FBF00188FB100148FB00010F3
61271+:107910000040202100002821000030210A000845F5
61272+:1079200027BD002000A010213087FFFF8CA5000498
61273+:107930008C4400000A000845240600068F83FD9C45
61274+:1079400027BDFFE8AFBF0014AFB00010906700087C
61275+:10795000008010210080282130E600400000202116
61276+:1079600010C000088C5000000E0000BD0200202155
61277+:10797000020020218FBF00148FB000100A000548BC
61278+:1079800027BD00180E0008A4000000000E0000BD76
61279+:1079900002002021020020218FBF00148FB00010B0
61280+:1079A0000A00054827BD001827BDFFE0AFB0001052
61281+:1079B0008F90FD9CAFBF001CAFB20018AFB1001498
61282+:1079C00092060001008088210E00087230D2000467
61283+:1079D00092040005001129C2A6050000348300406E
61284+:1079E000A20300050E00087C022020210E00054A9B
61285+:1079F0000220202124020001AE02000C02202821D6
61286+:107A0000A602001024040002A602001224060200AE
61287+:107A1000A60200140E000725A60200161640000F4D
61288+:107A20008FBF001C978C00743C0B08008D6B007896
61289+:107A30002588FFFF3109FFFF256A0001012A382B45
61290+:107A400010E00006A78800743C0F6006240E0016A4
61291+:107A500035ED0010ADAE00508FBF001C8FB2001886
61292+:107A60008FB100148FB0001003E0000827BD002084
61293+:107A700027BDFFE0AFB10014AFBF0018AFB00010DA
61294+:107A80001080000400A088212402008010820007DA
61295+:107A9000000000000000000D8FBF00188FB100141F
61296+:107AA0008FB0001003E0000827BD00200E00087210
61297+:107AB00000A020218F86FD9C0220202190C500057A
61298+:107AC0000E00087C30B000FF2403003E1603FFF1D7
61299+:107AD0003C0680008CC401780480FFFE34C801405D
61300+:107AE000240900073C071000AD11000002202021EE
61301+:107AF000A10900048FBF00188FB100148FB00010CF
61302+:107B0000ACC701780A0008C527BD002027BDFFE0EB
61303+:107B1000AFB00010AFBF0018AFB100143C10800030
61304+:107B20008E110020000000000E00054AAE04002067
61305+:107B3000AE1100208FBF00188FB100148FB000105D
61306+:107B400003E0000827BD00203084FFFF00803821BB
61307+:107B50002406003500A020210A0008450000282145
61308+:107B60003084FFFF008038212406003600A0202149
61309+:107B70000A0008450000282127BDFFD0AFB500242A
61310+:107B80003095FFFFAFB60028AFB40020AFBF002C88
61311+:107B9000AFB3001CAFB20018AFB10014AFB000100B
61312+:107BA00030B6FFFF12A000270000A0218F920058DE
61313+:107BB0008E4300003C0680002402004000033E0289
61314+:107BC00000032C0230E4007F006698241482001D1C
61315+:107BD00030A500FF8F8300682C68000A1100001098
61316+:107BE0008F8D0044000358803C0C0800258C57B84A
61317+:107BF000016C50218D4900000120000800000000A8
61318+:107C000002D4302130C5FFFF0E0008522404008446
61319+:107C1000166000028F920058AF8000688F8D00447C
61320+:107C20002659002026980001032090213314FFFFDD
61321+:107C300015A00004AF9900580295202B1480FFDC9A
61322+:107C400000000000028010218FBF002C8FB600289A
61323+:107C50008FB500248FB400208FB3001C8FB20018A2
61324+:107C60008FB100148FB0001003E0000827BD003072
61325+:107C70002407003414A70149000000009247000EB9
61326+:107C80008F9FFDA08F90FD9C24181600A3E700197C
61327+:107C90009242000D3C0880003C07800CA3E20018D3
61328+:107CA000964A00123C0D60003C117FFFA60A005C62
61329+:107CB000964400103623FFFF240200053099FFFF91
61330+:107CC000AE1900548E46001CAD1800288CEF000041
61331+:107CD0008DAE444801E6482601C93021AE06003881
61332+:107CE0008E05003824CB00013C0E7F00AE05003C21
61333+:107CF0008E0C003CAFEC0004AE0B00208E13002075
61334+:107D0000AE13001CA3E0001BAE03002CA3E2001284
61335+:107D10008E4A001424130050AE0A00348E0400343E
61336+:107D2000AFE400148E590018AE1900489258000CA8
61337+:107D3000A218004E920D000835AF0020A20F0008D7
61338+:107D40008E090018012E282434AC4000AE0C001817
61339+:107D5000920B0000317200FF1253027F2403FF8058
61340+:107D60003C04080024845BE80E0008AA0000000020
61341+:107D70003C1108008E315BE80E00087202202021C1
61342+:107D80002405000424080001A2050025022020216A
61343+:107D90000E00087CA20800053C0580008CB001782C
61344+:107DA0000600FFFE8F92005834AE0140240F0002FF
61345+:107DB0003C091000ADD10000A1CF0004ACA90178AE
61346+:107DC0000A000962AF8000682CAD003751A0FF9413
61347+:107DD0008F8D0044000580803C110800263157E05B
61348+:107DE000021178218DEE000001C0000800000000A3
61349+:107DF0002411000414B1008C3C0780003C080800EA
61350+:107E00008D085BE88F86FD9CACE800208E4500085D
61351+:107E10008F99FDA0240D0050ACC500308E4C000899
61352+:107E2000ACCC00508E4B000CACCB00348E43001019
61353+:107E3000ACC300388E4A0010ACCA00548E42001405
61354+:107E4000ACC2003C8E5F0018AF3F00048E50001C97
61355+:107E5000ACD0002090C40000309800FF130D024AFF
61356+:107E6000000000008CC400348CD00030009030231F
61357+:107E700004C000F12404008C126000EE2402000310
61358+:107E80000A000962AF8200682419000514B900666F
61359+:107E90003C0580003C0808008D085BE88F86FD9C4F
61360+:107EA000ACA800208E4C00048F8AFDA0240720007F
61361+:107EB000ACCC001C924B000824120008A14B001906
61362+:107EC0008F82005890430009A14300188F85005805
61363+:107ED00090BF000A33E400FF1092001028890009C7
61364+:107EE000152000BA240E0002240D0020108D000B76
61365+:107EF000340780002898002117000008240740005C
61366+:107F000024100040109000053C0700012419008057
61367+:107F1000109900023C070002240740008CC20018A0
61368+:107F20003C03FF00004350240147F825ACDF001854
61369+:107F300090B2000BA0D200278F8300589464000CED
61370+:107F4000108001FE000000009467000C3C1F8000C0
61371+:107F50002405FFBFA4C7005C9063000E2407000443
61372+:107F6000A0C300088F820058904A000FA0CA0009E1
61373+:107F70008F8900588D3200108FE400740244C823AA
61374+:107F8000ACD900588D300014ACD0002C95380018B6
61375+:107F9000330DFFFFACCD00409531001A322FFFFFAB
61376+:107FA000ACCF00448D2E001CACCE00489128000EB2
61377+:107FB000A0C8000890CC000801855824126001B6C2
61378+:107FC000A0CB00088F9200580A000962AF870068B2
61379+:107FD0002406000614A600143C0E80003C0F080086
61380+:107FE0008DEF5BE88F85FD98ADCF00208E4900189E
61381+:107FF0008F86FD9C8F8BFDA0ACA900008CC800383B
61382+:1080000024040005ACA800048CCC003C1260008164
61383+:10801000AD6C00000A000962AF84006824110007FB
61384+:1080200010B1004B240400063C05080024A55BE8C1
61385+:108030000E000881240400818F9200580013102B39
61386+:108040000A000962AF820068241F002314BFFFF6F4
61387+:108050003C0C80003C0508008CA55BE88F8BFDA0E4
61388+:10806000AD8500208F91FD9C8E4600042564002084
61389+:1080700026450014AE260028240600030E000F81BA
61390+:10808000257000308F87005802002021240600034D
61391+:108090000E000F8124E500083C04080024845BE8FE
61392+:1080A0000E0008AA0000000092230000240A0050DD
61393+:1080B000306200FF544AFFE18F9200580E000F6CAF
61394+:1080C000000000000A000A6A8F920058240800335A
61395+:1080D00014A800323C0380003C1108008E315BE89C
61396+:1080E0008F8FFDA0AC7100208E420008240D002867
61397+:1080F0008F89FD9CADE200308E4A000C24060009F9
61398+:10810000ADEA00348E5F0010ADFF00388E440014DD
61399+:10811000ADE400208E590018ADF900248E58001CE3
61400+:10812000ADF80028A1ED00118E4E00041260003160
61401+:10813000AD2E00288F9200580A000962AF860068B1
61402+:10814000240D002214ADFFB8000000002404000735
61403+:108150003C1008008E105BE83C188000AF10002037
61404+:108160005660FEAEAF8400683C04080024845BE8DF
61405+:108170000E0008AA241300508F84FD9C90920000EA
61406+:10818000325900FF1333014B000000008F9200585A
61407+:10819000000020210A000962AF8400683C05080045
61408+:1081A00024A55BE80E000858240400810A000A6A2E
61409+:1081B0008F92005802D498213265FFFF0E000852BA
61410+:1081C000240400840A0009628F920058108EFF5325
61411+:1081D000240704002887000310E00179241100041B
61412+:1081E000240F0001548FFF4D240740000A000A228B
61413+:1081F000240701003C05080024A55BE80E0008A444
61414+:10820000240400828F920058000030210A00096285
61415+:10821000AF8600683C04080024845BE88CC2003808
61416+:108220000E0008AA8CC3003C8F9200580A000AC0B6
61417+:1082300000002021240400823C05080024A55BE8FE
61418+:108240000E0008A4000000008F92005800001021CA
61419+:108250000A000962AF8200688E5000048F91FD9C75
61420+:108260003C078000ACF00020922C00050200282181
61421+:10827000318B0002156001562404008A8F92FDA004
61422+:108280002404008D9245001B30A6002014C001502C
61423+:1082900002002821922E00092408001231C900FF93
61424+:1082A0001128014B240400810E00087202002021D5
61425+:1082B0009258001B240F000402002021370D0042B9
61426+:1082C000A24D001B0E00087CA22F00253C0580005B
61427+:1082D0008CA401780480FFFE34B90140241F000201
61428+:1082E000AF300000A33F00048F9200583C101000F4
61429+:1082F000ACB001780A000A6B0013102B8E500004FA
61430+:108300008F91FD9C3C038000AC700020922A0005F8
61431+:108310000200282131420002144000172404008A80
61432+:10832000922C00092412000402002821318B00FF46
61433+:1083300011720011240400810E0008720200202135
61434+:108340008F89FDA0240800122405FFFE912F001B39
61435+:108350000200202135EE0020A12E001BA2280009DA
61436+:108360009226000500C538240E00087CA2270005CF
61437+:1083700002002821000020210E0009330000000027
61438+:108380000A000A6A8F9200588E4C00043C07800055
61439+:108390003C10080026105BE8ACEC00203C01080013
61440+:1083A000AC2C5BE8924B0003317100041220013BBE
61441+:1083B0008F84FD9C24020006A0820009924F001BBE
61442+:1083C000240EFFC031E9003F012E4025A08800089F
61443+:1083D0009245000330A6000114C0013200000000E5
61444+:1083E0008E420008AE0200083C0208008C425BF09E
61445+:1083F000104001318F90FDA0000219C28F8DFD9CAD
61446+:10840000A603000C8E4A000C24180001240400145A
61447+:10841000AE0A002C8E420010AE02001C965F0016C1
61448+:10842000A61F003C96590014A619003EADB8000CDA
61449+:10843000A5B80010A5B80012A5B80014A5B800167C
61450+:1084400012600144A2040011925100033232000272
61451+:108450002E5300018F920058266200080A0009621C
61452+:10846000AF8200688E4400043C1980003C068008FE
61453+:10847000AF2400208E45000890D80000240D005045
61454+:10848000331100FF122D009C2407008824060009E8
61455+:108490000E000845000000000A000A6A8F9200588A
61456+:1084A0008E5000043C0980003C118008AD30002053
61457+:1084B0009228000024050050310400FF10850110AF
61458+:1084C0002407008802002021000028210E00084512
61459+:1084D0002406000E922D00002418FF80020028219F
61460+:1084E00001B8802524040004240600300E0007256E
61461+:1084F000A23000000A000A6A8F9200588E500004D1
61462+:108500008F91FDA03C028000AC500020923F001BE8
61463+:1085100033F900101320006C240700810200202191
61464+:10852000000028212406001F0E000845000000005E
61465+:108530000A000A6A8F9200588E44001C0E00085DE3
61466+:1085400000000000104000E3004048218F880058E0
61467+:1085500024070089012020218D05001C240600012C
61468+:108560000E000845000000000A000A6A8F920058B9
61469+:10857000964900023C10080026105BE831280004F0
61470+:10858000110000973C0460008E4E001C3C0F8000E0
61471+:10859000ADEE00203C010800AC2E5BE896470002DF
61472+:1085A00030E40001148000E6000000008E42000468
61473+:1085B000AE0200083C1008008E105BF0120000ECC8
61474+:1085C0003C0F80008F92FD9C241000018E4E0018FD
61475+:1085D0008F8DFDA08F9FFD9801CF4825AE490018D3
61476+:1085E000A2400005AE50000C3C0808008D085BF06E
61477+:1085F0008F840058A6500010000839C2A6500012FF
61478+:10860000A6500014A6500016A5A7000C8C8C0008DC
61479+:108610008F8B00588F8A0058ADAC002C8D63000CF6
61480+:1086200024070002ADA3001C91460010A1A6001172
61481+:108630008F82005890450011A3E500088F990058DB
61482+:1086400093380012A258004E8F910058922F0013B9
61483+:10865000A1AF00128F920058964E0014A5AE003CB8
61484+:1086600096490016A5A9003E8E480018ADA8001432
61485+:108670005660FD6AAF8700683C05080024A55BE8EA
61486+:108680000E000881000020218F9200580000382140
61487+:108690000A000962AF8700683C05080024A55BE872
61488+:1086A0000E0008A4240400828F9200580A000A4D8C
61489+:1086B000000038210E000F6C000000008F9200585F
61490+:1086C0000A000AC0000020210E00087202002021CA
61491+:1086D0009223001B02002021346A00100E00087C47
61492+:1086E000A22A001B000038210200202100002821BE
61493+:1086F0000A000BA52406001F9242000C305F000107
61494+:1087000013E0000300000000964A000EA4CA002CEB
61495+:10871000924B000C316300025060000600003821CB
61496+:108720008E470014964C0012ACC7001CA4CC001A53
61497+:10873000000038210A000B7F240600093C050800D0
61498+:1087400024A55BE80E0008A42404008B8F92005837
61499+:108750000A000A4D0013382B3C0C08008D8C5BE896
61500+:1087600024DFFFFE25930100326B007F016790211B
61501+:1087700002638824AD110028AE4600E0AE4000E45C
61502+:108780000A0009B3AE5F001CACC000543C0D0800E9
61503+:108790008DAD5BE83C18800C37090100ACED00287A
61504+:1087A0008E510014AD3100E08E4F0014AD2F00E467
61505+:1087B0008E4E001025C7FFFE0A0009F4AD27001CED
61506+:1087C0005491FDD6240740000A000A222407100015
61507+:1087D0000E00092D000000000A000A6A8F9200585E
61508+:1087E0008C83442C3C12DEAD3651BEEF3C010800B8
61509+:1087F000AC205BE810710062000000003C196C6264
61510+:1088000037387970147800082404000297850074C2
61511+:108810009782006C2404009200A2F82B13E0001948
61512+:1088200002002821240400020E00069524050200FF
61513+:108830003C068000ACC200203C010800AC225BE892
61514+:108840001040000D8F8C0058240A002824040003D7
61515+:10885000918B0010316300FF546A00012404000171
61516+:108860000E0000810000000010400004240400837A
61517+:108870000A000BC28F920058240400833C050800B4
61518+:1088800024A55BE80E000881000000008F920058CC
61519+:108890000013382B0A000962AF8700680A000B49F1
61520+:1088A000240200128E4400080E00085D0000000043
61521+:1088B0000A000B55AE0200083C05080024A55BE841
61522+:1088C0000E000858240400878F9200580A000B728B
61523+:1088D0000013102B240400040E000695240500301C
61524+:1088E0001440002A004048218F8800582407008344
61525+:1088F000012020218D05001C0A000BB32406000175
61526+:108900008F8300788F8600701066FEEE000038219D
61527+:108910003C07080024E75B6C000320C00087282187
61528+:108920008CAE000011D0005D246F000131E3000F18
61529+:108930005466FFFA000320C00A000B8C00003821A7
61530+:108940008E4400040E00085D000000000A000BC801
61531+:10895000AE0200083C05080024A55BE80E0008A450
61532+:10896000240400828F9200580A000B72000010212C
61533+:108970003C05080024A55BE80A000C7C2404008761
61534+:108980008C83442C0A000C5B3C196C628F88005865
61535+:108990003C0780083C0C8000240B0050240A000196
61536+:1089A000AD820020A0EB0000A0EA000191030004CA
61537+:1089B000A0E3001891040005A0E400199106000648
61538+:1089C0003C04080024845B6CA0E6001A91020007B6
61539+:1089D0003C06080024C65B68A0E2001B9105000865
61540+:1089E000A0E5001C911F0009A0FF001D9119000ABD
61541+:1089F000A0F9001E9118000BA0F8001F9112000CA6
61542+:108A0000A0F200209111000DA0F100219110000EA4
61543+:108A1000A0F00022910F000FA0EF0023910E001094
61544+:108A2000A0EE0024910D0011A0ED0025950C00147E
61545+:108A3000A4EC0028950B00168F8A00708F920078A6
61546+:108A4000A4EB002A95030018000A10C02545000178
61547+:108A5000A4E3002C8D1F001C0044C0210046C82147
61548+:108A600030A5000FAF3F0000AF09000010B20006B4
61549+:108A7000AF850070000038218D05001C01202021E9
61550+:108A80000A000BB32406000124AD000131A7000F3A
61551+:108A9000AF8700780A000CF9000038213C06080076
61552+:108AA00024C65B680086902100003821ACA000003D
61553+:108AB0000A000B8CAE4000003C0482013C036000C5
61554+:108AC00034820E02AC603D68AF80009803E000087D
61555+:108AD000AC623D6C27BDFFE8AFB000103090FFFFE7
61556+:108AE000001018422C620041AFBF00141440000275
61557+:108AF00024040080240300403C010800AC300060E6
61558+:108B00003C010800AC2300640E000F7500602821B2
61559+:108B1000244802BF2409FF8001092824001039805D
61560+:108B2000001030408FBF00148FB0001000A720212C
61561+:108B300000861821AF8300803C010800AC25005856
61562+:108B40003C010800AC24005C03E0000827BD0018CD
61563+:108B5000308300FF30C6FFFF30E400FF3C08800098
61564+:108B60008D0201B80440FFFE000354000144382583
61565+:108B70003C09600000E920253C031000AD050180A0
61566+:108B8000AD060184AD04018803E00008AD0301B81F
61567+:108B90008F8500583C0A6012354800108CAC0004E8
61568+:108BA0003C0D600E35A60010318B00062D690001CA
61569+:108BB000AD0900C48CA70004ACC731808CA20008AA
61570+:108BC00094A40002ACC231848CA3001C0460000396
61571+:108BD000A784009003E00008000000008CAF00189C
61572+:108BE000ACCF31D08CAE001C03E00008ACCE31D449
61573+:108BF0008F8500588F87FF288F86FF308CAE00044A
61574+:108C00003C0F601235E80010ACEE00788CAD000827
61575+:108C1000ACED007C8CAC0010ACCC004C8CAB000CF0
61576+:108C2000ACCB004894CA00543C0208008C4200447B
61577+:108C300025490001A4C9005494C400543083FFFFA7
61578+:108C400010620017000000003C0208008C42004047
61579+:108C5000A4C200528CA30018ACE300308CA2001414
61580+:108C6000ACE2002C8CB90018ACF900388CB80014B8
61581+:108C700024050001ACF800348D0600BC50C5001975
61582+:108C80008D0200B48D0200B8A4E2004894E40048CC
61583+:108C9000A4E4004A94E800EA03E000083102FFFF80
61584+:108CA0003C0208008C420024A4C00054A4C200521C
61585+:108CB0008CA30018ACE300308CA20014ACE2002CB2
61586+:108CC0008CB90018ACF900388CB8001424050001E8
61587+:108CD000ACF800348D0600BC54C5FFEB8D0200B823
61588+:108CE0008D0200B4A4E2004894E40048A4E4004AE1
61589+:108CF00094E800EA03E000083102FFFF8F86005885
61590+:108D00003C0480008CC900088CC80008000929C0F8
61591+:108D1000000839C0AC87002090C30007306200040F
61592+:108D20001040003EAF85009490CB0007316A0008E8
61593+:108D30001140003D8F87FF2C8CCD000C8CCE001491
61594+:108D400001AE602B11800036000000008CC2000CC8
61595+:108D5000ACE200708CCB00188F85FF288F88FF3025
61596+:108D6000ACEB00748CCA00102402FFF8ACAA00D847
61597+:108D70008CC9000CAD0900608CC4001CACA400D0F0
61598+:108D800090E3007C0062C824A0F9007C90D8000722
61599+:108D9000330F000811E000040000000090ED007C9B
61600+:108DA00035AC0001A0EC007C90CF000731EE000153
61601+:108DB00011C000060000000090E3007C241800347D
61602+:108DC00034790002A0F9007CACB800DC90C2000746
61603+:108DD0003046000210C000040000000090E8007C53
61604+:108DE00035040004A0E4007C90ED007D3C0B600E97
61605+:108DF000356A001031AC003FA0EC007D8D4931D4C4
61606+:108E00003127000110E00002240E0001A0AE00098D
61607+:108E100094AF00EA03E0000831E2FFFF8F87FF2CE8
61608+:108E20000A000DAF8CC200140A000DB0ACE0007057
61609+:108E30008F8C005827BDFFD8AFB3001CAFB200180D
61610+:108E4000AFB00010AFBF0020AFB10014918F00157C
61611+:108E50003C13600E3673001031EB000FA38B009CA7
61612+:108E60008D8F00048D8B0008959F0012959900103E
61613+:108E70009584001A9598001E958E001C33EDFFFF17
61614+:108E8000332AFFFF3089FFFF3308FFFF31C7FFFFA1
61615+:108E90003C010800AC2D00243C010800AC29004432
61616+:108EA0003C010800AC2A0040AE683178AE67317CE6
61617+:108EB00091850015959100163C12601236520010F3
61618+:108EC00030A200FF3230FFFFAE623188AE5000B4F6
61619+:108ED00091830014959F0018240600010066C804C1
61620+:108EE00033F8FFFFAE5900B8AE5800BC918E0014A5
61621+:108EF000AF8F00843C08600631CD00FFAE4D00C04E
61622+:108F0000918A00159584000E3C07600A314900FFE4
61623+:108F1000AF8B00883084FFFFAE4900C835110010C8
61624+:108F20000E000D1034F004103C0208008C4200606A
61625+:108F30003C0308008C6300643C0608008CC60058A3
61626+:108F40003C0508008CA5005C8F8400808FBF00204A
61627+:108F5000AE23004CAE65319CAE030054AE4500DC40
61628+:108F6000AE6231A0AE6331A4AE663198AE22004845
61629+:108F70008FB3001CAE0200508FB10014AE4200E06F
61630+:108F8000AE4300E4AE4600D88FB000108FB2001898
61631+:108F90000A00057D27BD0028978500929783007CF5
61632+:108FA00027BDFFE8AFB0001000A3102BAFBF001427
61633+:108FB000240400058F900058104000552409000239
61634+:108FC0000E0006958F850080AF8200942404000374
61635+:108FD0001040004F240900023C0680000E00008172
61636+:108FE000ACC2002024070001240820001040004DDE
61637+:108FF00024040005978E00928F8AFF2C24090050CC
61638+:1090000025C50001A7850092A14900003C0D08007C
61639+:109010008DAD0064240380008F84FF28000D66005E
61640+:10902000AD4C0018A5400006954B000A8F85FF3017
61641+:109030002402FF8001633024A546000A915F000AE4
61642+:109040000000482103E2C825A159000AA0A0000899
61643+:10905000A140004CA08000D5961800029783009094
61644+:109060003C020004A49800EA960F00022418FFBFF7
61645+:1090700025EE2401A48E00BE8E0D0004ACAD00448C
61646+:109080008E0C0008ACAC0040A4A00050A4A000547A
61647+:109090008E0B000C240C0030AC8B00288E060010C8
61648+:1090A000AC860024A480003EA487004EA487005014
61649+:1090B000A483003CAD420074AC8800D8ACA800602A
61650+:1090C000A08700FC909F00D433F9007FA09900D4C2
61651+:1090D000909000D402187824A08F00D4914E007C88
61652+:1090E00035CD0001A14D007C938B009CAD480070F4
61653+:1090F000AC8C00DCA08B00D68F8800888F87008422
61654+:10910000AC8800C4AC8700C8A5400078A540007AB0
61655+:109110008FBF00148FB000100120102103E0000861
61656+:1091200027BD00188F8500940E0007258F860080CC
61657+:109130000A000E9F2409000227BDFFE0AFB0001017
61658+:109140008F900058AFB10014AFBF00188E09000413
61659+:109150000E00054A000921C08E0800048F84FF28F4
61660+:109160008F82FF30000839C03C068000ACC7002069
61661+:10917000948500EA904300131460001C30B1FFFF97
61662+:109180008F8CFF2C918B0008316A00401540000B3A
61663+:10919000000000008E0D0004022030218FBF001857
61664+:1091A0008FB100148FB00010240400220000382179
61665+:1091B000000D29C00A000D2F27BD00200E000098C9
61666+:1091C000000000008E0D0004022030218FBF001827
61667+:1091D0008FB100148FB00010240400220000382149
61668+:1091E000000D29C00A000D2F27BD00200E000090A1
61669+:1091F000000000008E0D0004022030218FBF0018F7
61670+:109200008FB100148FB00010240400220000382118
61671+:10921000000D29C00A000D2F27BD002027BDFFE04B
61672+:10922000AFB200183092FFFFAFB00010AFBF001C0C
61673+:10923000AFB100141240001E000080218F8600583C
61674+:109240008CC500002403000600053F02000514023F
61675+:1092500030E4000714830016304500FF2CA80006F8
61676+:1092600011000040000558803C0C0800258C58BCBB
61677+:10927000016C50218D490000012000080000000011
61678+:109280008F8E0098240D000111CD005024020002A1
61679+:10929000AF820098260900013130FFFF24C800206A
61680+:1092A0000212202B010030211480FFE5AF88005806
61681+:1092B000020010218FBF001C8FB200188FB1001464
61682+:1092C0008FB0001003E0000827BD00209387007EC8
61683+:1092D00054E00034000030210E000DE700000000D3
61684+:1092E0008F8600580A000EFF240200018F87009825
61685+:1092F0002405000210E50031240400130000282199
61686+:1093000000003021240700010E000D2F0000000096
61687+:109310000A000F008F8600588F83009824020002F5
61688+:109320001462FFF6240400120E000D9A00000000E3
61689+:109330008F85009400403021240400120E000D2F70
61690+:10934000000038210A000F008F8600588F83009894
61691+:109350002411000310710029241F0002107FFFCE8A
61692+:1093600026090001240400100000282100003021FB
61693+:109370000A000F1D240700018F91009824060002A7
61694+:109380001626FFF9240400100E000E410000000014
61695+:10939000144000238F9800588F8600580A000EFF53
61696+:1093A00024020003240400140E000D2F00002821C5
61697+:1093B0008F8600580A000EFF240200020E000EA93C
61698+:1093C000000000000A000F008F8600580E000D3FBD
61699+:1093D00000000000241900022404001400002821C9
61700+:1093E0000000302100003821AF9900980E000D2FA9
61701+:1093F000000000000A000F008F8600580E000D5775
61702+:10940000000000008F8500942419000200403021E4
61703+:1094100024040010000038210A000F56AF9900986C
61704+:109420000040382124040010970F0002000028217A
61705+:109430000E000D2F31E6FFFF8F8600580A000F0047
61706+:10944000AF9100988F84FF2C3C077FFF34E6FFFF2D
61707+:109450008C8500182402000100A61824AC83001893
61708+:1094600003E00008A08200053084FFFF30A5FFFF65
61709+:109470001080000700001821308200011040000217
61710+:1094800000042042006518211480FFFB00052840DD
61711+:1094900003E000080060102110C000070000000079
61712+:1094A0008CA2000024C6FFFF24A50004AC820000AB
61713+:1094B00014C0FFFB2484000403E000080000000047
61714+:1094C00010A0000824A3FFFFAC86000000000000ED
61715+:1094D000000000002402FFFF2463FFFF1462FFFA74
61716+:1094E0002484000403E0000800000000000411C010
61717+:1094F00003E000082442024027BDFFE8AFB000109F
61718+:1095000000808021AFBF00140E000F9600A0202124
61719+:1095100000504821240AFF808FBF00148FB0001034
61720+:10952000012A30243127007F3C08800A3C042100B6
61721+:1095300000E8102100C428253C03800027BD001846
61722+:10954000AC650024AF820038AC400000AC6500245C
61723+:1095500003E00008AC4000403C0D08008DAD005811
61724+:1095600000056180240AFF8001A45821016C482174
61725+:10957000012A30243127007F3C08800C3C04210064
61726+:1095800000E8102100C428253C038000AC650028B9
61727+:10959000AF82003403E00008AC40002430A5FFFF98
61728+:1095A0003C0680008CC201B80440FFFE3C086015F8
61729+:1095B00000A838253C031000ACC40180ACC0018475
61730+:1095C000ACC7018803E00008ACC301B83C0D08003B
61731+:1095D0008DAD005800056180240AFF8001A4582148
61732+:1095E000016C4021010A4824000931403107007F05
61733+:1095F00000C728253C04200000A418253C02800058
61734+:10960000AC43083003E00008AF80003427BDFFE81A
61735+:10961000AFB0001000808021AFBF00140E000F9685
61736+:1096200000A0202100504821240BFF80012B502452
61737+:10963000000A39403128007F3C0620008FBF00140B
61738+:109640008FB0001000E8282534C2000100A21825C0
61739+:109650003C04800027BD0018AC83083003E00008FC
61740+:10966000AF8000383C0580088CA700603C0680086D
61741+:109670000087102B144000112C8340008CA8006040
61742+:109680002D0340001060000F240340008CC90060CF
61743+:109690000089282B14A00002008018218CC30060D0
61744+:1096A00000035A42000B30803C0A0800254A59202A
61745+:1096B00000CA202103E000088C8200001460FFF340
61746+:1096C0002403400000035A42000B30803C0A08008B
61747+:1096D000254A592000CA202103E000088C8200009E
61748+:1096E0003C05800890A60008938400AB24C20001CA
61749+:1096F000304200FF3043007F1064000C0002382726
61750+:10970000A0A200083C0480008C85017804A0FFFE24
61751+:109710008F8A00A0240900023C081000AC8A014096
61752+:10972000A089014403E00008AC8801780A00101BFE
61753+:1097300030E2008027BDFFD8AFB200188F9200A49E
61754+:10974000AFBF0020AFB3001CAFB00010AFB100142A
61755+:109750008F9300348E5900283C1000803C0EFFEFA0
61756+:10976000AE7900008E580024A260000A35CDFFFFBC
61757+:10977000AE7800049251002C3C0BFF9F356AFFFF2E
61758+:10978000A271000C8E6F000C3C080040A271000B0F
61759+:1097900001F06025018D4824012A382400E8302595
61760+:1097A000AE66000C8E450004AE6000183C0400FF5D
61761+:1097B000AE6500148E43002C3482FFFFA6600008C3
61762+:1097C0000062F824AE7F00108E5900088F9000A030
61763+:1097D000964E0012AE7900208E51000C31D83FFF1A
61764+:1097E00000187980AE7100248E4D001401F06021C4
61765+:1097F00031CB0001AE6D00288E4A0018000C41C22A
61766+:10980000000B4B80AE6A002C8E46001C01093821EB
61767+:10981000A667001CAE660030964500028E4400200C
61768+:10982000A665001EAE64003492430033306200042B
61769+:1098300054400006924700003C0280083443010077
61770+:109840008C7F00D0AE7F0030924700008F860038BA
61771+:10985000A0C700309245003330A4000250800007BA
61772+:10986000925100018F880038240BFF80910A00304C
61773+:10987000014B4825A1090030925100018F9000381A
61774+:10988000240CFFBF2404FFDFA21100318F8D0038AC
61775+:109890003C1880083711008091AF003C31EE007F0A
61776+:1098A000A1AE003C8F890038912B003C016C502404
61777+:1098B000A12A003C8F9F00388E68001493E6003C7C
61778+:1098C0002D0700010007114000C4282400A218251C
61779+:1098D000A3E3003C8F87003896590012A4F90032A8
61780+:1098E0008E450004922E007C30B0000300107823D7
61781+:1098F00031ED000300AD102131CC000215800002D3
61782+:1099000024460034244600303C0280083443008062
61783+:10991000907F007C00BFC824333800041700000289
61784+:1099200024C2000400C010218F98003824190002BE
61785+:10993000ACE20034A3190000924F003F8F8E003834
61786+:109940003C0C8008358B0080A1CF00018F9100383E
61787+:10995000924D003F8E440004A62D0002956A005CE3
61788+:109960000E000FF43150FFFF00024B800209382532
61789+:109970003C08420000E82825AE2500048E4400384B
61790+:109980008F850038ACA400188E460034ACA6001CAD
61791+:10999000ACA0000CACA00010A4A00014A4A0001661
61792+:1099A000A4A00020A4A00022ACA000248E62001479
61793+:1099B00050400001240200018FBF00208FB3001C23
61794+:1099C0008FB200188FB100148FB00010ACA2000845
61795+:1099D0000A00101327BD002827BDFFC83C058008DA
61796+:1099E00034A40080AFBF0034AFBE0030AFB7002C4E
61797+:1099F000AFB60028AFB50024AFB40020AFB3001C51
61798+:109A0000AFB20018AFB10014AFB00010948300786B
61799+:109A10009482007A104300512405FFFF0080F0215A
61800+:109A20000A0011230080B821108B004D8FBF003435
61801+:109A30008F8600A03C1808008F18005C2411FF805E
61802+:109A40003C1680000306782101F18024AED0002C62
61803+:109A500096EE007A31EC007F3C0D800E31CB7FFF1B
61804+:109A6000018D5021000B4840012AA82196A4000036
61805+:109A70003C0808008D0800582405FF8030953FFF02
61806+:109A800001061821001539800067C8210325F82434
61807+:109A90003C02010003E290253338007F3C11800C2A
61808+:109AA000AED20028031190219250000D320F000415
61809+:109AB00011E0003702E0982196E3007A96E8007AF8
61810+:109AC00096E5007A2404800031077FFF24E300013B
61811+:109AD00030627FFF00A4F82403E2C825A6F9007ACB
61812+:109AE00096E6007A3C1408008E94006030D67FFF22
61813+:109AF00012D400C1000000008E5800188F8400A00E
61814+:109B000002A028212713FFFF0E000FCEAE53002C1A
61815+:109B100097D5007897D4007A12950010000028217C
61816+:109B20003C098008352401003C0A8008914800085F
61817+:109B3000908700D53114007F30E400FF0284302B81
61818+:109B400014C0FFB9268B0001938E00AB268C000158
61819+:109B5000008E682115ACFFB78F8600A08FBF003440
61820+:109B60008FBE00308FB7002C8FB600288FB5002431
61821+:109B70008FB400208FB3001C8FB200188FB1001477
61822+:109B80008FB0001000A0102103E0000827BD0038AE
61823+:109B900000C020210E000F99028028218E4B00105A
61824+:109BA0008E4C00308F84003824090002016C502351
61825+:109BB000AE4A0010A089000096E3005C8E4400309D
61826+:109BC0008F9100380E000FF43070FFFF00024380C9
61827+:109BD000020838253C02420000E22825AE25000498
61828+:109BE0008E5F00048F8A00388E590000240B000815
61829+:109BF000AD5F001CAD590018AD40000CAD40001029
61830+:109C00009246000A240400052408C00030D000FF5A
61831+:109C1000A550001496580008A55800169251000A45
61832+:109C20003C188008322F00FFA54F0020964E0008F8
61833+:109C300037110100A54E0022AD400024924D000BCB
61834+:109C400031AC00FFA54C0002A14B00018E49003051
61835+:109C50008F830038240BFFBFAC690008A06400307C
61836+:109C60008F9000382403FFDF9607003200E8282495
61837+:109C700000B51025A6020032921F003233F9003FD2
61838+:109C800037260040A20600328F8C0038AD800034A9
61839+:109C90008E2F00D0AD8F0038918E003C3C0F7FFF9F
61840+:109CA00031CD007FA18D003C8F84003835EEFFFF61
61841+:109CB000908A003C014B4824A089003C8F850038E5
61842+:109CC00090A8003C01033824A0A7003C8E42003439
61843+:109CD0008F9100383C038008AE2200408E59002C42
61844+:109CE0008E5F0030033F3023AE26004492300048A0
61845+:109CF0003218007FA23800488F8800388E4D00301F
61846+:109D00008D0C004801AE582401965024014B482583
61847+:109D1000AD0900489244000AA104004C964700088F
61848+:109D20008F850038A4A7004E8E5000308E4400303E
61849+:109D30000E0003818C65006092F9007C0002F940FE
61850+:109D4000004028210002110003E2302133360002D6
61851+:109D500012C00003020680210005B0800216802197
61852+:109D6000926D007C31B30004126000020005708027
61853+:109D7000020E80218E4B00308F8800382405800031
61854+:109D8000316A0003000A4823312400030204182129
61855+:109D9000AD03003496E4007A96F0007A96F1007AEA
61856+:109DA00032027FFF2447000130FF7FFF0225C824D5
61857+:109DB000033F3025A6E6007A96F8007A3C120800A8
61858+:109DC0008E520060330F7FFF11F200180000000078
61859+:109DD0008F8400A00E000FCE02A028218F8400A047
61860+:109DE0000E000FDE028028210E001013000000007C
61861+:109DF0000A00111F0000000096F1007A022480245E
61862+:109E0000A6F0007A92EF007A92EB007A31EE00FF32
61863+:109E1000000E69C2000D6027000C51C03169007F3F
61864+:109E2000012A20250A001119A2E4007A96E6007A98
61865+:109E300000C5C024A6F8007A92EF007A92F3007A67
61866+:109E400031F200FF001271C2000E6827000DB1C090
61867+:109E5000326C007F01962825A2E5007A0A0011D015
61868+:109E60008F8400A03C0380003084FFFF30A5FFFFFB
61869+:109E7000AC640018AC65001C03E000088C620014A0
61870+:109E800027BDFFA03C068008AFBF005CAFBE0058F6
61871+:109E9000AFB70054AFB60050AFB5004CAFB40048F8
61872+:109EA000AFB30044AFB20040AFB1003CAFB0003838
61873+:109EB00034C80100910500D590C700083084FFFF29
61874+:109EC00030A500FF30E2007F0045182AAFA4001043
61875+:109ED000A7A00018A7A0002610600055AFA000148E
61876+:109EE00090CA00083149007F00A9302324D3FFFF26
61877+:109EF0000013802B8FB400100014902B02128824C2
61878+:109F0000522000888FB300143C03800894790052DB
61879+:109F1000947E00508FB60010033EC0230018BC0092
61880+:109F2000001714030016FC0002C2A82A16A00002A3
61881+:109F3000001F2C030040282100133C0000072403CD
61882+:109F400000A4102A5440000100A020212885000907
61883+:109F500014A000020080A021241400083C0C8008FA
61884+:109F60008D860048001459808D88004C3C03800089
61885+:109F70003169FFFF3C0A0010012A202534710400DA
61886+:109F8000AC660038AF9100A4AC68003CAC64003013
61887+:109F900000000000000000000000000000000000C1
61888+:109FA00000000000000000000000000000000000B1
61889+:109FB0008C6E000031CD002011A0FFFD0014782A26
61890+:109FC00001F01024104000390000A8213C16800840
61891+:109FD00092D700083C1280008E44010032F6007FC8
61892+:109FE0000E000F9902C028218E3900108E44010006
61893+:109FF0000000902133373FFF0E000FB102E028210F
61894+:10A00000923800003302003F2C500008520000102C
61895+:10A0100000008821000210803C030800246358E4FB
61896+:10A020000043F8218FFE000003C00008000000007C
61897+:10A0300090CF0008938C00AB31EE007F00AE682318
61898+:10A04000018D58210A0012172573FFFF0000882197
61899+:10A050003C1E80008FC401000E000FCE02E02821BC
61900+:10A060008FC401000E000FDE02C028211220000F55
61901+:10A070000013802B8F8B00A426A400010004AC00E9
61902+:10A08000027298230015AC032578004002B4B02A70
61903+:10A090000013802B241700010300882102D0102414
61904+:10A0A000AF9800A41440FFC9AFB700143C07800864
61905+:10A0B00094E200508FAE00103C05800002A288217F
61906+:10A0C0003C060020A4F10050ACA6003094F40050EF
61907+:10A0D00094EF005201D51823306CFFFF11F4001EDD
61908+:10A0E000AFAC00108CEF004C001561808CF500487F
61909+:10A0F00001EC28210000202100AC582B02A4C02133
61910+:10A10000030BB021ACE5004CACF600488FB4001056
61911+:10A110000014902B021288241620FF7C3C03800838
61912+:10A120008FB300148FBF005C8FBE00583A620001ED
61913+:10A130008FB700548FB600508FB5004C8FB40048D5
61914+:10A140008FB300448FB200408FB1003C8FB0003815
61915+:10A1500003E0000827BD006094FE00548CF2004428
61916+:10A1600033C9FFFE0009C8C00259F821ACBF003C4A
61917+:10A170008CE800448CAD003C010D50231940003B9D
61918+:10A18000000000008CF7004026E20001ACA200387D
61919+:10A190003C05005034A700103C038000AC67003041
61920+:10A1A00000000000000000000000000000000000AF
61921+:10A1B000000000000000000000000000000000009F
61922+:10A1C0008C7800003316002012C0FFFD3C1180087F
61923+:10A1D000962200543C1580003C068008304E000159
61924+:10A1E000000E18C0007578218DEC04003C070800B3
61925+:10A1F0008CE700443C040020ACCC00488DF40404FF
61926+:10A20000240B0001ACD4004C10EB0260AEA4003073
61927+:10A21000963900523C0508008CA5004000B99021F9
61928+:10A22000A6320052963F005427ED0001A62D00549F
61929+:10A230009626005430C4FFFF5487FF2F8FB40010C0
61930+:10A2400030A5FFFF0E0011F4A62000543C070800C3
61931+:10A250008CE70024963E00520047B82303D74823DA
61932+:10A26000A62900520A0012198FB400108CE2004097
61933+:10A270000A0012BE00000000922400012407000121
61934+:10A280003085007F14A7001C97AD00268E2B00148C
61935+:10A29000240CC000316A3FFF01AC48243C06080092
61936+:10A2A0008CC60060012A402531043FFF0086882BC0
61937+:10A2B00012200011A7A800263C0508008CA5005814
61938+:10A2C0008F9100A0000439802402FF8000B1182182
61939+:10A2D0000067F82103E2F02433F8007F3C1280008D
61940+:10A2E0003C19800EAE5E002C0319702191D0000D38
61941+:10A2F000360F0004A1CF000D0E001028241200011B
61942+:10A30000241100013C1E80008FC401000E000FCEFE
61943+:10A3100002E028218FC401000E000FDE02C02821B8
61944+:10A320001620FF558F8B00A40A0012860013802B85
61945+:10A330008F8600A490C80001310400201080019194
61946+:10A34000241000013C048008348B0080916A007C5A
61947+:10A350008F9E0034AFA0002C314900011120000F66
61948+:10A36000AFB000288CCD00148C8E006001AE602B45
61949+:10A370001580000201A038218C8700603C188008FD
61950+:10A38000370300808C70007000F0782B15E000021D
61951+:10A3900000E020218C640070AFA4002C3C028008F7
61952+:10A3A000344500808CD200148CBF0070025FC82B33
61953+:10A3B00017200002024020218CA400708FA7002CDF
61954+:10A3C0000087182310600003AFA3003024050002AB
61955+:10A3D000AFA500288FA400280264882B162000BA9D
61956+:10A3E000000018218CD000388FCE000C3C0F00806C
61957+:10A3F000AFD000008CCD00343C0CFF9F01CF58251E
61958+:10A40000AFCD000490CA003F3586FFFF01662024CF
61959+:10A410003C0900203C08FFEFA3CA000B0089382547
61960+:10A420003511FFFF00F118243C0500088F8700A4B8
61961+:10A430000065C825AFD9000C8CE20014AFC000182D
61962+:10A440008FA60030AFC200148CF800188FB0002C1B
61963+:10A450003C1FFFFBAFD8001C8CEF000837F2FFFF5A
61964+:10A4600003326824AFCF00248CEC000C020670216C
61965+:10A47000AFCD000CA7C00038A7C0003AAFCE002C6B
61966+:10A48000AFCC0020AFC000288CEA00148FAB002CAA
61967+:10A49000014B48230126402311000011AFC80010D2
61968+:10A4A00090EB003D8FC900048FC80000000B5100E5
61969+:10A4B000012A28210000102100AA882B010218215E
61970+:10A4C0000071F821AFC50004AFDF000090F2003D3D
61971+:10A4D000A3D2000A8F9900A497380006A7D80008D5
61972+:10A4E0008F910038240800023C038008A228000055
61973+:10A4F0003465008094BF005C8FA4002C33F0FFFF14
61974+:10A500000E000FF48F9200380002CB808F8500A4DC
61975+:10A51000021978253C18420001F87025AE4E00045F
61976+:10A520008F8400388CAD0038AC8D00188CAC0034B2
61977+:10A53000AC8C001CAC80000CAC800010A48000141B
61978+:10A54000A4800016A4800020A4800022AC800024F7
61979+:10A5500090A6003F8FA7002CA486000250E0019235
61980+:10A56000240700018FA200305040000290A2003D5D
61981+:10A5700090A2003E244A0001A08A00018F84003886
61982+:10A580008FA9002CAC8900083C128008364D008051
61983+:10A5900091AC007C3186000214C000022407003414
61984+:10A5A000240700308F8500A43C198008373F0080C5
61985+:10A5B00090B0000093F9007C240E0004A0900030BD
61986+:10A5C0008F8F00A48FB8002C8F8D003891F200017E
61987+:10A5D0003304000301C46023A1B200318F8E003820
61988+:10A5E0008F8600A42402C00095CA003294C90012CC
61989+:10A5F0008FAB002C0142402431233FFF010388250B
61990+:10A60000A5D1003291D000323185000300EBF82152
61991+:10A610003218003F370F0040A1CF00328FA4002C2A
61992+:10A6200003E5382133280004108000028F850038AC
61993+:10A6300000E838213C0A8008ACA700343549010005
61994+:10A640008D2800D08FA3002C2419FFBFACA80038A0
61995+:10A6500090B1003C2C640001240FFFDF3227007F03
61996+:10A66000A0A7003C8F98003800049140931F003C45
61997+:10A6700003F98024A310003C8F8C0038918E003C9D
61998+:10A6800001CF682401B23025A186003C8F8900A447
61999+:10A690008F8800388D2B0020AD0B00408D220024C8
62000+:10A6A000AD0200448D2A0028AD0A00488D23002CFD
62001+:10A6B0000E001013AD03004C8FB1002824070002D8
62002+:10A6C000122700118FA300280003282B00058023E8
62003+:10A6D0000270982400608021006090210A00126FAF
62004+:10A6E0000010882B962900128F8400A00000902172
62005+:10A6F0003125FFFFA7A900180E000FC22411000189
62006+:10A700000A00131D3C1E80003C0B80003C12800898
62007+:10A710008D640100924900088F92FF340E000F995A
62008+:10A720003125007F8F9900388FA700288FA4003033
62009+:10A73000A3270000965F005C33F0FFFF0E000FF4CC
62010+:10A740008F91003800026B80020D80253C0842008A
62011+:10A750008F8D00A402085025AE2A00048DA5003874
62012+:10A760008F8A003800007821000F1100AD450018D5
62013+:10A770008DB800343C047FFF3488FFFFAD58001CC7
62014+:10A7800091A6003E8D4C001C8D4900180006190052
62015+:10A79000000677020183C821004E58250323882B29
62016+:10A7A000012B382100F1F821AD59001CAD5F0018D4
62017+:10A7B000AD40000CAD40001091B0003E8FA40030C1
62018+:10A7C00024090005A550001495A500042419C00013
62019+:10A7D00000884024A545001691B8003EA5580020E9
62020+:10A7E00095AF0004A54F0022AD40002491AE003F7C
62021+:10A7F000A54E000291A6003E91AC003D01861023BB
62022+:10A80000244B0001A14B00018F9100388FA3003031
62023+:10A810003C028008344B0100AE230008A22900301E
62024+:10A820008F8C00388F8700A4959F003294F000121F
62025+:10A830002407FFBF033FC02432053FFF03057825EF
62026+:10A84000A58F0032918E00322418FFDF31CD003FFA
62027+:10A8500035A60040A18600328F910038240DFFFFFD
62028+:10A86000240CFF80AE2000348D6A00D0AE2A003860
62029+:10A870009223003C3069007FA229003C8F90003871
62030+:10A880003C0380009219003C0327F824A21F003CDF
62031+:10A890008F8E003891C5003C00B87824A1CF003CD1
62032+:10A8A0008F8A00383C0E8008AD4D00408FA6002CEA
62033+:10A8B000AD46004491420048004C5825A14B004849
62034+:10A8C0008F9000388F9900A48E09004801238824B6
62035+:10A8D00002283825AE070048933F003EA21F004CD7
62036+:10A8E0008F9800A48F8F003897050004A5E5004ECF
62037+:10A8F0000E0003818DC500609246007C8FAC003055
62038+:10A9000000026940000291000040282130CB000283
62039+:10A9100001B21021156000AA018230213C0E80088E
62040+:10A9200035C20080904C007C31830004106000032D
62041+:10A930008FB900300005788000CF3021241F00043B
62042+:10A940008F910038332D000303ED8023320800037C
62043+:10A9500000C85021AE2A00343C188000A7C500383A
62044+:10A960003C0680088F04010090DE00080E000FDE18
62045+:10A9700033C5007F0E001013000000000A00140D04
62046+:10A980008FA300288F9800348CC90038241F00033F
62047+:10A99000A7000008AF0900008CC50034A300000A1E
62048+:10A9A0008F9900A4AF0500043C080080932D003F60
62049+:10A9B000A31F000C8F0A000C3C02FF9FA30D000B8D
62050+:10A9C0000148F0253451FFFF3C12FFEF8F9900A49E
62051+:10A9D00003D170243646FFFF01C61824AF03000CD4
62052+:10A9E0008F2C0014972900128F8400A0AF0C001048
62053+:10A9F0008F2F0014AF000018AF000020AF0F00141D
62054+:10AA0000AF0000248F270018312F3FFF000F59801F
62055+:10AA1000AF0700288F2500080164F821312D0001BF
62056+:10AA2000AF0500308F31000C8F920038001F51C2EB
62057+:10AA3000000D438001481021241E00023C068008BE
62058+:10AA4000A702001CA7000034AF11002CA25E00007A
62059+:10AA500034D20080964E005C8F9900383C0342004F
62060+:10AA600031CCFFFF01833825AF2700048F8B00A472
62061+:10AA7000240500012402C0008D640038240700343E
62062+:10AA8000AF2400188D690034AF29001CAF20000CE2
62063+:10AA9000AF200010A7200014A7200016A720002038
62064+:10AAA000A7200022AF200024A7300002A325000128
62065+:10AAB0008F8800388F9F00A4AD10000893ED000030
62066+:10AAC000A10D00308F8A00A48F98003891510001A9
62067+:10AAD000A31100318F8B0038957E003203C27024A1
62068+:10AAE00001CF6025A56C0032916300323064003FD5
62069+:10AAF000A16400329249007C3125000214A00002BA
62070+:10AB00008F840038240700303C198008AC8700345B
62071+:10AB1000373201008E5F00D0240AFFBF020090216F
62072+:10AB2000AC9F0038908D003C31A8007FA088003C8D
62073+:10AB30008F9E003893C2003C004A8824A3D1003C79
62074+:10AB40008F8300380010882B9066003C34CE0020A4
62075+:10AB5000A06E003C8F8400A48F9800388C8C00205D
62076+:10AB6000AF0C00408C8F0024AF0F00448C8700286E
62077+:10AB7000AF0700488C8B002CAF0B004C0E0010135D
62078+:10AB80003C1E80000A0012700000000094C80052B1
62079+:10AB90003C0A08008D4A002401488821A4D10052B3
62080+:10ABA0000A0012198FB40010A08700018F840038AA
62081+:10ABB000240B0001AC8B00080A0013BE3C12800875
62082+:10ABC000000520800A0014A200C4302127BDFFE048
62083+:10ABD0003C0D8008AFB20018AFB00010AFBF001C32
62084+:10ABE000AFB1001435B200808E4C001835A80100BA
62085+:10ABF000964B000695A70050910900FC000C5602E8
62086+:10AC0000016728233143007F312600FF240200031F
62087+:10AC1000AF8300A8AF8400A010C2001B30B0FFFFBC
62088+:10AC2000910600FC2412000530C200FF10520033D0
62089+:10AC300000000000160000098FBF001C8FB2001832
62090+:10AC40008FB100148FB00010240D0C003C0C80005C
62091+:10AC500027BD002003E00008AD8D00240E0011FB8D
62092+:10AC6000020020218FBF001C8FB200188FB100148A
62093+:10AC70008FB00010240D0C003C0C800027BD00207C
62094+:10AC800003E00008AD8D0024965800789651007AB4
62095+:10AC9000924E007D0238782631E8FFFF31C400C0B3
62096+:10ACA000148000092D11000116000037000000007B
62097+:10ACB0005620FFE28FBF001C0E0010D100000000E4
62098+:10ACC0000A00156A8FBF001C1620FFDA0000000082
62099+:10ACD0000E0010D1000000001440FFD88FBF001CF0
62100+:10ACE0001600002200000000925F007D33E2003F6A
62101+:10ACF000A242007D0A00156A8FBF001C950900EA78
62102+:10AD00008F86008000802821240400050E0007257E
62103+:10AD10003130FFFF978300923C0480002465FFFFE1
62104+:10AD2000A78500928C8A01B80540FFFE0000000054
62105+:10AD3000AC8001808FBF001CAC9001848FB20018E2
62106+:10AD40008FB100148FB000103C0760133C0B100053
62107+:10AD5000240D0C003C0C800027BD0020AC8701882E
62108+:10AD6000AC8B01B803E00008AD8D00240E0011FB90
62109+:10AD7000020020215040FFB18FBF001C925F007D78
62110+:10AD80000A00159733E2003F0E0011FB020020215C
62111+:10AD90001440FFAA8FBF001C122000070000000013
62112+:10ADA0009259007D3330003F36020040A242007DC0
62113+:10ADB0000A00156A8FBF001C0E0010D100000000B1
62114+:10ADC0005040FF9E8FBF001C9259007D3330003FE2
62115+:10ADD0000A0015C636020040000000000000001BFB
62116+:10ADE0000000000F0000000A00000008000000063C
62117+:10ADF0000000000500000005000000040000000441
62118+:10AE00000000000300000003000000030000000336
62119+:10AE10000000000300000002000000020000000229
62120+:10AE2000000000020000000200000002000000021A
62121+:10AE3000000000020000000200000002000000020A
62122+:10AE400000000002000000020000000200000002FA
62123+:10AE50000000000100000001000000018008010066
62124+:10AE6000800800808008000000000C000000308096
62125+:10AE7000080011D00800127C08001294080012A8E3
62126+:10AE8000080012BC080011D0080011D0080012F010
62127+:10AE90000800132C080013400800138808001A8CBF
62128+:10AEA00008001A8C08001AC408001AC408001AD82E
62129+:10AEB00008001AA808001D0008001CCC08001D5836
62130+:10AEC00008001D5808001DE008001D108008024001
62131+:10AED000080027340800256C0800275C080027F4C8
62132+:10AEE0000800293C0800298808002AAC080029B479
62133+:10AEF00008002A38080025DC08002EDC08002EA4F3
62134+:10AF000008002588080025880800258808002B20CF
62135+:10AF100008002B20080025880800258808002DD06F
62136+:10AF2000080025880800258808002588080025884D
62137+:10AF300008002E0C080025880800258808002588B0
62138+:10AF4000080025880800258808002588080025882D
62139+:10AF5000080025880800258808002588080025881D
62140+:10AF6000080025880800258808002588080029A8E9
62141+:10AF7000080025880800258808002E680800258814
62142+:10AF800008002588080025880800258808002588ED
62143+:10AF900008002588080025880800258808002588DD
62144+:10AFA00008002588080025880800258808002588CD
62145+:10AFB00008002588080025880800258808002588BD
62146+:10AFC00008002CF4080025880800258808002C6853
62147+:10AFD00008002BC408003CE408003CB808003C848E
62148+:10AFE00008003C5808003C3808003BEC8008010091
62149+:10AFF00080080080800800008008008008004C6401
62150+:10B0000008004C9C08004BE408004C6408004C64A9
62151+:10B01000080049B808004C64080050500A000C842D
62152+:10B0200000000000000000000000000D7278703683
62153+:10B030002E322E31620000000602010300000000E3
62154+:10B0400000000001000000000000000000000000FF
62155+:10B0500000000000000000000000000000000000F0
62156+:10B0600000000000000000000000000000000000E0
62157+:10B0700000000000000000000000000000000000D0
62158+:10B0800000000000000000000000000000000000C0
62159+:10B0900000000000000000000000000000000000B0
62160+:10B0A00000000000000000000000000000000000A0
62161+:10B0B0000000000000000000000000000000000090
62162+:10B0C0000000000000000000000000000000000080
62163+:10B0D0000000000000000000000000000000000070
62164+:10B0E0000000000000000000000000000000000060
62165+:10B0F0000000000000000000000000000000000050
62166+:10B10000000000000000000000000000000000003F
62167+:10B11000000000000000000000000000000000002F
62168+:10B12000000000000000000000000000000000001F
62169+:10B13000000000000000000000000000000000000F
62170+:10B1400000000000000000000000000000000000FF
62171+:10B1500000000000000000000000000000000000EF
62172+:10B1600000000000000000000000000000000000DF
62173+:10B1700000000000000000000000000000000000CF
62174+:10B1800000000000000000000000000000000000BF
62175+:10B1900000000000000000000000000000000000AF
62176+:10B1A000000000000000000000000000000000009F
62177+:10B1B000000000000000000000000000000000008F
62178+:10B1C000000000000000000000000000000000007F
62179+:10B1D000000000000000000000000000000000006F
62180+:10B1E000000000000000000000000000000000005F
62181+:10B1F000000000000000000000000000000000004F
62182+:10B20000000000000000000000000000000000003E
62183+:10B21000000000000000000000000000000000002E
62184+:10B22000000000000000000000000000000000001E
62185+:10B23000000000000000000000000000000000000E
62186+:10B2400000000000000000000000000000000000FE
62187+:10B2500000000000000000000000000000000000EE
62188+:10B2600000000000000000000000000000000000DE
62189+:10B2700000000000000000000000000000000000CE
62190+:10B2800000000000000000000000000000000000BE
62191+:10B2900000000000000000000000000000000000AE
62192+:10B2A000000000000000000000000000000000009E
62193+:10B2B000000000000000000000000000000000008E
62194+:10B2C000000000000000000000000000000000007E
62195+:10B2D000000000000000000000000000000000006E
62196+:10B2E000000000000000000000000000000000005E
62197+:10B2F000000000000000000000000000000000004E
62198+:10B30000000000000000000000000000000000003D
62199+:10B31000000000000000000000000000000000002D
62200+:10B32000000000000000000000000000000000001D
62201+:10B33000000000000000000000000000000000000D
62202+:10B3400000000000000000000000000000000000FD
62203+:10B3500000000000000000000000000000000000ED
62204+:10B3600000000000000000000000000000000000DD
62205+:10B3700000000000000000000000000000000000CD
62206+:10B3800000000000000000000000000000000000BD
62207+:10B3900000000000000000000000000000000000AD
62208+:10B3A000000000000000000000000000000000009D
62209+:10B3B000000000000000000000000000000000008D
62210+:10B3C000000000000000000000000000000000007D
62211+:10B3D000000000000000000000000000000000006D
62212+:10B3E000000000000000000000000000000000005D
62213+:10B3F000000000000000000000000000000000004D
62214+:10B40000000000000000000000000000000000003C
62215+:10B41000000000000000000000000000000000002C
62216+:10B42000000000000000000000000000000000001C
62217+:10B43000000000000000000000000000000000000C
62218+:10B4400000000000000000000000000000000000FC
62219+:10B4500000000000000000000000000000000000EC
62220+:10B4600000000000000000000000000000000000DC
62221+:10B4700000000000000000000000000000000000CC
62222+:10B4800000000000000000000000000000000000BC
62223+:10B4900000000000000000000000000000000000AC
62224+:10B4A000000000000000000000000000000000009C
62225+:10B4B000000000000000000000000000000000008C
62226+:10B4C000000000000000000000000000000000007C
62227+:10B4D000000000000000000000000000000000006C
62228+:10B4E000000000000000000000000000000000005C
62229+:10B4F000000000000000000000000000000000004C
62230+:10B50000000000000000000000000000000000003B
62231+:10B51000000000000000000000000000000000002B
62232+:10B52000000000000000000000000000000000001B
62233+:10B53000000000000000000000000000000000000B
62234+:10B5400000000000000000000000000000000000FB
62235+:10B5500000000000000000000000000000000000EB
62236+:10B5600000000000000000000000000000000000DB
62237+:10B5700000000000000000000000000000000000CB
62238+:10B5800000000000000000000000000000000000BB
62239+:10B5900000000000000000000000000000000000AB
62240+:10B5A000000000000000000000000000000000009B
62241+:10B5B000000000000000000000000000000000008B
62242+:10B5C000000000000000000000000000000000007B
62243+:10B5D000000000000000000000000000000000006B
62244+:10B5E000000000000000000000000000000000005B
62245+:10B5F000000000000000000000000000000000004B
62246+:10B60000000000000000000000000000000000003A
62247+:10B61000000000000000000000000000000000002A
62248+:10B62000000000000000000000000000000000001A
62249+:10B63000000000000000000000000000000000000A
62250+:10B6400000000000000000000000000000000000FA
62251+:10B6500000000000000000000000000000000000EA
62252+:10B6600000000000000000000000000000000000DA
62253+:10B6700000000000000000000000000000000000CA
62254+:10B6800000000000000000000000000000000000BA
62255+:10B6900000000000000000000000000000000000AA
62256+:10B6A000000000000000000000000000000000009A
62257+:10B6B000000000000000000000000000000000008A
62258+:10B6C000000000000000000000000000000000007A
62259+:10B6D000000000000000000000000000000000006A
62260+:10B6E000000000000000000000000000000000005A
62261+:10B6F000000000000000000000000000000000004A
62262+:10B700000000000000000000000000000000000039
62263+:10B710000000000000000000000000000000000029
62264+:10B720000000000000000000000000000000000019
62265+:10B730000000000000000000000000000000000009
62266+:10B7400000000000000000000000000000000000F9
62267+:10B7500000000000000000000000000000000000E9
62268+:10B7600000000000000000000000000000000000D9
62269+:10B7700000000000000000000000000000000000C9
62270+:10B7800000000000000000000000000000000000B9
62271+:10B7900000000000000000000000000000000000A9
62272+:10B7A0000000000000000000000000000000000099
62273+:10B7B0000000000000000000000000000000000089
62274+:10B7C0000000000000000000000000000000000079
62275+:10B7D0000000000000000000000000000000000069
62276+:10B7E0000000000000000000000000000000000059
62277+:10B7F0000000000000000000000000000000000049
62278+:10B800000000000000000000000000000000000038
62279+:10B810000000000000000000000000000000000028
62280+:10B820000000000000000000000000000000000018
62281+:10B830000000000000000000000000000000000008
62282+:10B8400000000000000000000000000000000000F8
62283+:10B8500000000000000000000000000000000000E8
62284+:10B8600000000000000000000000000000000000D8
62285+:10B8700000000000000000000000000000000000C8
62286+:10B8800000000000000000000000000000000000B8
62287+:10B8900000000000000000000000000000000000A8
62288+:10B8A0000000000000000000000000000000000098
62289+:10B8B0000000000000000000000000000000000088
62290+:10B8C0000000000000000000000000000000000078
62291+:10B8D0000000000000000000000000000000000068
62292+:10B8E0000000000000000000000000000000000058
62293+:10B8F0000000000000000000000000000000000048
62294+:10B900000000000000000000000000000000000037
62295+:10B910000000000000000000000000000000000027
62296+:10B920000000000000000000000000000000000017
62297+:10B930000000000000000000000000000000000007
62298+:10B9400000000000000000000000000000000000F7
62299+:10B9500000000000000000000000000000000000E7
62300+:10B9600000000000000000000000000000000000D7
62301+:10B9700000000000000000000000000000000000C7
62302+:10B9800000000000000000000000000000000000B7
62303+:10B9900000000000000000000000000000000000A7
62304+:10B9A0000000000000000000000000000000000097
62305+:10B9B0000000000000000000000000000000000087
62306+:10B9C0000000000000000000000000000000000077
62307+:10B9D0000000000000000000000000000000000067
62308+:10B9E0000000000000000000000000000000000057
62309+:10B9F0000000000000000000000000000000000047
62310+:10BA00000000000000000000000000000000000036
62311+:10BA10000000000000000000000000000000000026
62312+:10BA20000000000000000000000000000000000016
62313+:10BA30000000000000000000000000000000000006
62314+:10BA400000000000000000000000000000000000F6
62315+:10BA500000000000000000000000000000000000E6
62316+:10BA600000000000000000000000000000000000D6
62317+:10BA700000000000000000000000000000000000C6
62318+:10BA800000000000000000000000000000000000B6
62319+:10BA900000000000000000000000000000000000A6
62320+:10BAA0000000000000000000000000000000000096
62321+:10BAB0000000000000000000000000000000000086
62322+:10BAC0000000000000000000000000000000000076
62323+:10BAD0000000000000000000000000000000000066
62324+:10BAE0000000000000000000000000000000000056
62325+:10BAF0000000000000000000000000000000000046
62326+:10BB00000000000000000000000000000000000035
62327+:10BB10000000000000000000000000000000000025
62328+:10BB20000000000000000000000000000000000015
62329+:10BB30000000000000000000000000000000000005
62330+:10BB400000000000000000000000000000000000F5
62331+:10BB500000000000000000000000000000000000E5
62332+:10BB600000000000000000000000000000000000D5
62333+:10BB700000000000000000000000000000000000C5
62334+:10BB800000000000000000000000000000000000B5
62335+:10BB900000000000000000000000000000000000A5
62336+:10BBA0000000000000000000000000000000000095
62337+:10BBB0000000000000000000000000000000000085
62338+:10BBC0000000000000000000000000000000000075
62339+:10BBD0000000000000000000000000000000000065
62340+:10BBE0000000000000000000000000000000000055
62341+:10BBF0000000000000000000000000000000000045
62342+:10BC00000000000000000000000000000000000034
62343+:10BC10000000000000000000000000000000000024
62344+:10BC20000000000000000000000000000000000014
62345+:10BC30000000000000000000000000000000000004
62346+:10BC400000000000000000000000000000000000F4
62347+:10BC500000000000000000000000000000000000E4
62348+:10BC600000000000000000000000000000000000D4
62349+:10BC700000000000000000000000000000000000C4
62350+:10BC800000000000000000000000000000000000B4
62351+:10BC900000000000000000000000000000000000A4
62352+:10BCA0000000000000000000000000000000000094
62353+:10BCB0000000000000000000000000000000000084
62354+:10BCC0000000000000000000000000000000000074
62355+:10BCD0000000000000000000000000000000000064
62356+:10BCE0000000000000000000000000000000000054
62357+:10BCF0000000000000000000000000000000000044
62358+:10BD00000000000000000000000000000000000033
62359+:10BD10000000000000000000000000000000000023
62360+:10BD20000000000000000000000000000000000013
62361+:10BD30000000000000000000000000000000000003
62362+:10BD400000000000000000000000000000000000F3
62363+:10BD500000000000000000000000000000000000E3
62364+:10BD600000000000000000000000000000000000D3
62365+:10BD700000000000000000000000000000000000C3
62366+:10BD800000000000000000000000000000000000B3
62367+:10BD900000000000000000000000000000000000A3
62368+:10BDA0000000000000000000000000000000000093
62369+:10BDB0000000000000000000000000000000000083
62370+:10BDC0000000000000000000000000000000000073
62371+:10BDD0000000000000000000000000000000000063
62372+:10BDE0000000000000000000000000000000000053
62373+:10BDF0000000000000000000000000000000000043
62374+:10BE00000000000000000000000000000000000032
62375+:10BE10000000000000000000000000000000000022
62376+:10BE20000000000000000000000000000000000012
62377+:10BE30000000000000000000000000000000000002
62378+:10BE400000000000000000000000000000000000F2
62379+:10BE500000000000000000000000000000000000E2
62380+:10BE600000000000000000000000000000000000D2
62381+:10BE700000000000000000000000000000000000C2
62382+:10BE800000000000000000000000000000000000B2
62383+:10BE900000000000000000000000000000000000A2
62384+:10BEA0000000000000000000000000000000000092
62385+:10BEB0000000000000000000000000000000000082
62386+:10BEC0000000000000000000000000000000000072
62387+:10BED0000000000000000000000000000000000062
62388+:10BEE0000000000000000000000000000000000052
62389+:10BEF0000000000000000000000000000000000042
62390+:10BF00000000000000000000000000000000000031
62391+:10BF10000000000000000000000000000000000021
62392+:10BF20000000000000000000000000000000000011
62393+:10BF30000000000000000000000000000000000001
62394+:10BF400000000000000000000000000000000000F1
62395+:10BF500000000000000000000000000000000000E1
62396+:10BF600000000000000000000000000000000000D1
62397+:10BF700000000000000000000000000000000000C1
62398+:10BF800000000000000000000000000000000000B1
62399+:10BF900000000000000000000000000000000000A1
62400+:10BFA0000000000000000000000000000000000091
62401+:10BFB0000000000000000000000000000000000081
62402+:10BFC0000000000000000000000000000000000071
62403+:10BFD0000000000000000000000000000000000061
62404+:10BFE0000000000000000000000000000000000051
62405+:10BFF0000000000000000000000000000000000041
62406+:10C000000000000000000000000000000000000030
62407+:10C010000000000000000000000000000000000020
62408+:10C020000000000000000000000000000000000010
62409+:10C030000000000000000000000000000000000000
62410+:10C0400000000000000000000000000000000000F0
62411+:10C0500000000000000000000000000000000000E0
62412+:10C0600000000000000000000000000000000000D0
62413+:10C0700000000000000000000000000000000000C0
62414+:10C0800000000000000000000000000000000000B0
62415+:10C0900000000000000000000000000000000000A0
62416+:10C0A0000000000000000000000000000000000090
62417+:10C0B0000000000000000000000000000000000080
62418+:10C0C0000000000000000000000000000000000070
62419+:10C0D0000000000000000000000000000000000060
62420+:10C0E0000000000000000000000000000000000050
62421+:10C0F0000000000000000000000000000000000040
62422+:10C10000000000000000000000000000000000002F
62423+:10C11000000000000000000000000000000000001F
62424+:10C12000000000000000000000000000000000000F
62425+:10C1300000000000000000000000000000000000FF
62426+:10C1400000000000000000000000000000000000EF
62427+:10C1500000000000000000000000000000000000DF
62428+:10C1600000000000000000000000000000000000CF
62429+:10C1700000000000000000000000000000000000BF
62430+:10C1800000000000000000000000000000000000AF
62431+:10C19000000000000000000000000000000000009F
62432+:10C1A000000000000000000000000000000000008F
62433+:10C1B000000000000000000000000000000000007F
62434+:10C1C000000000000000000000000000000000006F
62435+:10C1D000000000000000000000000000000000005F
62436+:10C1E000000000000000000000000000000000004F
62437+:10C1F000000000000000000000000000000000003F
62438+:10C20000000000000000000000000000000000002E
62439+:10C21000000000000000000000000000000000001E
62440+:10C22000000000000000000000000000000000000E
62441+:10C2300000000000000000000000000000000000FE
62442+:10C2400000000000000000000000000000000000EE
62443+:10C2500000000000000000000000000000000000DE
62444+:10C2600000000000000000000000000000000000CE
62445+:10C2700000000000000000000000000000000000BE
62446+:10C2800000000000000000000000000000000000AE
62447+:10C29000000000000000000000000000000000009E
62448+:10C2A000000000000000000000000000000000008E
62449+:10C2B000000000000000000000000000000000007E
62450+:10C2C000000000000000000000000000000000006E
62451+:10C2D000000000000000000000000000000000005E
62452+:10C2E000000000000000000000000000000000004E
62453+:10C2F000000000000000000000000000000000003E
62454+:10C30000000000000000000000000000000000002D
62455+:10C31000000000000000000000000000000000001D
62456+:10C32000000000000000000000000000000000000D
62457+:10C3300000000000000000000000000000000000FD
62458+:10C3400000000000000000000000000000000000ED
62459+:10C3500000000000000000000000000000000000DD
62460+:10C3600000000000000000000000000000000000CD
62461+:10C3700000000000000000000000000000000000BD
62462+:10C3800000000000000000000000000000000000AD
62463+:10C39000000000000000000000000000000000009D
62464+:10C3A000000000000000000000000000000000008D
62465+:10C3B000000000000000000000000000000000007D
62466+:10C3C000000000000000000000000000000000006D
62467+:10C3D000000000000000000000000000000000005D
62468+:10C3E000000000000000000000000000000000004D
62469+:10C3F000000000000000000000000000000000003D
62470+:10C40000000000000000000000000000000000002C
62471+:10C41000000000000000000000000000000000001C
62472+:10C42000000000000000000000000000000000000C
62473+:10C4300000000000000000000000000000000000FC
62474+:10C4400000000000000000000000000000000000EC
62475+:10C4500000000000000000000000000000000000DC
62476+:10C4600000000000000000000000000000000000CC
62477+:10C4700000000000000000000000000000000000BC
62478+:10C4800000000000000000000000000000000000AC
62479+:10C49000000000000000000000000000000000009C
62480+:10C4A000000000000000000000000000000000008C
62481+:10C4B000000000000000000000000000000000007C
62482+:10C4C000000000000000000000000000000000006C
62483+:10C4D000000000000000000000000000000000005C
62484+:10C4E000000000000000000000000000000000004C
62485+:10C4F000000000000000000000000000000000003C
62486+:10C50000000000000000000000000000000000002B
62487+:10C51000000000000000000000000000000000001B
62488+:10C52000000000000000000000000000000000000B
62489+:10C5300000000000000000000000000000000000FB
62490+:10C5400000000000000000000000000000000000EB
62491+:10C5500000000000000000000000000000000000DB
62492+:10C5600000000000000000000000000000000000CB
62493+:10C5700000000000000000000000000000000000BB
62494+:10C5800000000000000000000000000000000000AB
62495+:10C59000000000000000000000000000000000009B
62496+:10C5A000000000000000000000000000000000008B
62497+:10C5B000000000000000000000000000000000007B
62498+:10C5C000000000000000000000000000000000006B
62499+:10C5D000000000000000000000000000000000005B
62500+:10C5E000000000000000000000000000000000004B
62501+:10C5F000000000000000000000000000000000003B
62502+:10C60000000000000000000000000000000000002A
62503+:10C61000000000000000000000000000000000001A
62504+:10C62000000000000000000000000000000000000A
62505+:10C6300000000000000000000000000000000000FA
62506+:10C6400000000000000000000000000000000000EA
62507+:10C6500000000000000000000000000000000000DA
62508+:10C6600000000000000000000000000000000000CA
62509+:10C6700000000000000000000000000000000000BA
62510+:10C6800000000000000000000000000000000000AA
62511+:10C69000000000000000000000000000000000009A
62512+:10C6A000000000000000000000000000000000008A
62513+:10C6B000000000000000000000000000000000007A
62514+:10C6C000000000000000000000000000000000006A
62515+:10C6D000000000000000000000000000000000005A
62516+:10C6E000000000000000000000000000000000004A
62517+:10C6F000000000000000000000000000000000003A
62518+:10C700000000000000000000000000000000000029
62519+:10C710000000000000000000000000000000000019
62520+:10C720000000000000000000000000000000000009
62521+:10C7300000000000000000000000000000000000F9
62522+:10C7400000000000000000000000000000000000E9
62523+:10C7500000000000000000000000000000000000D9
62524+:10C7600000000000000000000000000000000000C9
62525+:10C7700000000000000000000000000000000000B9
62526+:10C7800000000000000000000000000000000000A9
62527+:10C790000000000000000000000000000000000099
62528+:10C7A0000000000000000000000000000000000089
62529+:10C7B0000000000000000000000000000000000079
62530+:10C7C0000000000000000000000000000000000069
62531+:10C7D0000000000000000000000000000000000059
62532+:10C7E0000000000000000000000000000000000049
62533+:10C7F0000000000000000000000000000000000039
62534+:10C800000000000000000000000000000000000028
62535+:10C810000000000000000000000000000000000018
62536+:10C820000000000000000000000000000000000008
62537+:10C8300000000000000000000000000000000000F8
62538+:10C8400000000000000000000000000000000000E8
62539+:10C8500000000000000000000000000000000000D8
62540+:10C8600000000000000000000000000000000000C8
62541+:10C8700000000000000000000000000000000000B8
62542+:10C8800000000000000000000000000000000000A8
62543+:10C890000000000000000000000000000000000098
62544+:10C8A0000000000000000000000000000000000088
62545+:10C8B0000000000000000000000000000000000078
62546+:10C8C0000000000000000000000000000000000068
62547+:10C8D0000000000000000000000000000000000058
62548+:10C8E0000000000000000000000000000000000048
62549+:10C8F0000000000000000000000000000000000038
62550+:10C900000000000000000000000000000000000027
62551+:10C910000000000000000000000000000000000017
62552+:10C920000000000000000000000000000000000007
62553+:10C9300000000000000000000000000000000000F7
62554+:10C9400000000000000000000000000000000000E7
62555+:10C9500000000000000000000000000000000000D7
62556+:10C9600000000000000000000000000000000000C7
62557+:10C9700000000000000000000000000000000000B7
62558+:10C9800000000000000000000000000000000000A7
62559+:10C990000000000000000000000000000000000097
62560+:10C9A0000000000000000000000000000000000087
62561+:10C9B0000000000000000000000000000000000077
62562+:10C9C0000000000000000000000000000000000067
62563+:10C9D0000000000000000000000000000000000057
62564+:10C9E0000000000000000000000000000000000047
62565+:10C9F0000000000000000000000000000000000037
62566+:10CA00000000000000000000000000000000000026
62567+:10CA10000000000000000000000000000000000016
62568+:10CA20000000000000000000000000000000000006
62569+:10CA300000000000000000000000000000000000F6
62570+:10CA400000000000000000000000000000000000E6
62571+:10CA500000000000000000000000000000000000D6
62572+:10CA600000000000000000000000000000000000C6
62573+:10CA700000000000000000000000000000000000B6
62574+:10CA800000000000000000000000000000000000A6
62575+:10CA90000000000000000000000000000000000096
62576+:10CAA0000000000000000000000000000000000086
62577+:10CAB0000000000000000000000000000000000076
62578+:10CAC0000000000000000000000000000000000066
62579+:10CAD0000000000000000000000000000000000056
62580+:10CAE0000000000000000000000000000000000046
62581+:10CAF0000000000000000000000000000000000036
62582+:10CB00000000000000000000000000000000000025
62583+:10CB10000000000000000000000000000000000015
62584+:10CB20000000000000000000000000000000000005
62585+:10CB300000000000000000000000000000000000F5
62586+:10CB400000000000000000000000000000000000E5
62587+:10CB500000000000000000000000000000000000D5
62588+:10CB600000000000000000000000000000000000C5
62589+:10CB700000000000000000000000000000000000B5
62590+:10CB800000000000000000000000000000000000A5
62591+:10CB90000000000000000000000000000000000095
62592+:10CBA0000000000000000000000000000000000085
62593+:10CBB0000000000000000000000000000000000075
62594+:10CBC0000000000000000000000000000000000065
62595+:10CBD0000000000000000000000000000000000055
62596+:10CBE0000000000000000000000000000000000045
62597+:10CBF0000000000000000000000000000000000035
62598+:10CC00000000000000000000000000000000000024
62599+:10CC10000000000000000000000000000000000014
62600+:10CC20000000000000000000000000000000000004
62601+:10CC300000000000000000000000000000000000F4
62602+:10CC400000000000000000000000000000000000E4
62603+:10CC500000000000000000000000000000000000D4
62604+:10CC600000000000000000000000000000000000C4
62605+:10CC700000000000000000000000000000000000B4
62606+:10CC800000000000000000000000000000000000A4
62607+:10CC90000000000000000000000000000000000094
62608+:10CCA0000000000000000000000000000000000084
62609+:10CCB0000000000000000000000000000000000074
62610+:10CCC0000000000000000000000000000000000064
62611+:10CCD0000000000000000000000000000000000054
62612+:10CCE0000000000000000000000000000000000044
62613+:10CCF0000000000000000000000000000000000034
62614+:10CD00000000000000000000000000000000000023
62615+:10CD10000000000000000000000000000000000013
62616+:10CD20000000000000000000000000000000000003
62617+:10CD300000000000000000000000000000000000F3
62618+:10CD400000000000000000000000000000000000E3
62619+:10CD500000000000000000000000000000000000D3
62620+:10CD600000000000000000000000000000000000C3
62621+:10CD700000000000000000000000000000000000B3
62622+:10CD800000000000000000000000000000000000A3
62623+:10CD90000000000000000000000000000000000093
62624+:10CDA0000000000000000000000000000000000083
62625+:10CDB0000000000000000000000000000000000073
62626+:10CDC0000000000000000000000000000000000063
62627+:10CDD0000000000000000000000000000000000053
62628+:10CDE0000000000000000000000000000000000043
62629+:10CDF0000000000000000000000000000000000033
62630+:10CE00000000000000000000000000000000000022
62631+:10CE10000000000000000000000000000000000012
62632+:10CE20000000000000000000000000000000000002
62633+:10CE300000000000000000000000000000000000F2
62634+:10CE400000000000000000000000000000000000E2
62635+:10CE500000000000000000000000000000000000D2
62636+:10CE600000000000000000000000000000000000C2
62637+:10CE700000000000000000000000000000000000B2
62638+:10CE800000000000000000000000000000000000A2
62639+:10CE90000000000000000000000000000000000092
62640+:10CEA0000000000000000000000000000000000082
62641+:10CEB0000000000000000000000000000000000072
62642+:10CEC0000000000000000000000000000000000062
62643+:10CED0000000000000000000000000000000000052
62644+:10CEE0000000000000000000000000000000000042
62645+:10CEF0000000000000000000000000000000000032
62646+:10CF00000000000000000000000000000000000021
62647+:10CF10000000000000000000000000000000000011
62648+:10CF20000000000000000000000000000000000001
62649+:10CF300000000000000000000000000000000000F1
62650+:10CF400000000000000000000000000000000000E1
62651+:10CF500000000000000000000000000000000000D1
62652+:10CF600000000000000000000000000000000000C1
62653+:10CF700000000000000000000000000000000000B1
62654+:10CF800000000000000000000000000000000000A1
62655+:10CF90000000000000000000000000000000000091
62656+:10CFA0000000000000000000000000000000000081
62657+:10CFB0000000000000000000000000000000000071
62658+:10CFC0000000000000000000000000000000000061
62659+:10CFD0000000000000000000000000000000000051
62660+:10CFE0000000000000000000000000000000000041
62661+:10CFF0000000000000000000000000000000000031
62662+:10D000000000000000000000000000000000000020
62663+:10D010000000000000000000000000000000000010
62664+:10D020000000000000000000000000000000000000
62665+:10D0300000000000000000000000000000000000F0
62666+:10D0400000000000000000000000000000000000E0
62667+:10D0500000000000000000000000000000000000D0
62668+:10D0600000000000000000000000000000000000C0
62669+:10D0700000000000000000000000000000000000B0
62670+:10D0800000000000000000000000000000000000A0
62671+:10D090000000000000000000000000000000000090
62672+:10D0A0000000000000000000000000000000000080
62673+:10D0B0000000000000000000000000000000000070
62674+:10D0C0000000000000000000000000000000000060
62675+:10D0D0000000000000000000000000000000000050
62676+:10D0E0000000000000000000000000000000000040
62677+:10D0F0000000000000000000000000000000000030
62678+:10D10000000000000000000000000000000000001F
62679+:10D11000000000000000000000000000000000000F
62680+:10D1200000000000000000000000000000000000FF
62681+:10D1300000000000000000000000000000000000EF
62682+:10D1400000000000000000000000000000000000DF
62683+:10D1500000000000000000000000000000000000CF
62684+:10D1600000000000000000000000000000000000BF
62685+:10D1700000000000000000000000000000000000AF
62686+:10D18000000000000000000000000000000000009F
62687+:10D19000000000000000000000000000000000008F
62688+:10D1A000000000000000000000000000000000007F
62689+:10D1B000000000000000000000000000000000006F
62690+:10D1C000000000000000000000000000000000005F
62691+:10D1D000000000000000000000000000000000004F
62692+:10D1E000000000000000000000000000000000003F
62693+:10D1F000000000000000000000000000000000002F
62694+:10D20000000000000000000000000000000000001E
62695+:10D21000000000000000000000000000000000000E
62696+:10D2200000000000000000000000000000000000FE
62697+:10D2300000000000000000000000000000000000EE
62698+:10D2400000000000000000000000000000000000DE
62699+:10D2500000000000000000000000000000000000CE
62700+:10D2600000000000000000000000000000000000BE
62701+:10D2700000000000000000000000000000000000AE
62702+:10D28000000000000000000000000000000000009E
62703+:10D29000000000000000000000000000000000008E
62704+:10D2A000000000000000000000000000000000007E
62705+:10D2B000000000000000000000000000000000006E
62706+:10D2C000000000000000000000000000000000005E
62707+:10D2D000000000000000000000000000000000004E
62708+:10D2E000000000000000000000000000000000003E
62709+:10D2F000000000000000000000000000000000002E
62710+:10D30000000000000000000000000000000000001D
62711+:10D31000000000000000000000000000000000000D
62712+:10D3200000000000000000000000000000000000FD
62713+:10D3300000000000000000000000000000000000ED
62714+:10D3400000000000000000000000000000000000DD
62715+:10D3500000000000000000000000000000000000CD
62716+:10D3600000000000000000000000000000000000BD
62717+:10D3700000000000000000000000000000000000AD
62718+:10D38000000000000000000000000000000000009D
62719+:10D39000000000000000000000000000000000008D
62720+:10D3A000000000000000000000000000000000007D
62721+:10D3B000000000000000000000000000000000006D
62722+:10D3C000000000000000000000000000000000005D
62723+:10D3D000000000000000000000000000000000004D
62724+:10D3E000000000000000000000000000000000003D
62725+:10D3F000000000000000000000000000000000002D
62726+:10D40000000000000000000000000000000000001C
62727+:10D41000000000000000000000000000000000000C
62728+:10D4200000000000000000000000000000000000FC
62729+:10D4300000000000000000000000000000000000EC
62730+:10D4400000000000000000000000000000000000DC
62731+:10D4500000000000000000000000000000000000CC
62732+:10D4600000000000000000000000000000000000BC
62733+:10D4700000000000000000000000000000000000AC
62734+:10D48000000000000000000000000000000000009C
62735+:10D49000000000000000000000000000000000008C
62736+:10D4A000000000000000000000000000000000007C
62737+:10D4B000000000000000000000000000000000006C
62738+:10D4C000000000000000000000000000000000005C
62739+:10D4D000000000000000000000000000000000004C
62740+:10D4E000000000000000000000000000000000003C
62741+:10D4F000000000000000000000000000000000002C
62742+:10D50000000000000000000000000000000000001B
62743+:10D51000000000000000000000000000000000000B
62744+:10D5200000000000000000000000000000000000FB
62745+:10D5300000000000000000000000000000000000EB
62746+:10D5400000000000000000000000000000000000DB
62747+:10D5500000000000000000000000000000000000CB
62748+:10D5600000000000000000000000000000000000BB
62749+:10D5700000000000000000000000000000000000AB
62750+:10D58000000000000000000000000000000000009B
62751+:10D59000000000000000000000000000000000008B
62752+:10D5A000000000000000000000000000000000007B
62753+:10D5B000000000000000000000000000000000006B
62754+:10D5C000000000000000000000000000000000005B
62755+:10D5D000000000000000000000000000000000004B
62756+:10D5E000000000000000000000000000000000003B
62757+:10D5F000000000000000000000000000000000002B
62758+:10D60000000000000000000000000000000000001A
62759+:10D61000000000000000000000000000000000000A
62760+:10D6200000000000000000000000000000000000FA
62761+:10D6300000000000000000000000000000000000EA
62762+:10D6400000000000000000000000000000000000DA
62763+:10D6500000000000000000000000000000000000CA
62764+:10D6600000000000000000000000000000000000BA
62765+:10D6700000000000000000000000000000000000AA
62766+:10D68000000000000000000000000000000000009A
62767+:10D69000000000000000000000000000000000008A
62768+:10D6A000000000000000000000000000000000007A
62769+:10D6B000000000000000000000000000000000006A
62770+:10D6C000000000000000000000000000000000005A
62771+:10D6D000000000000000000000000000000000004A
62772+:10D6E000000000000000000000000000000000003A
62773+:10D6F000000000000000000000000000000000002A
62774+:10D700000000000000000000000000000000000019
62775+:10D710000000000000000000000000000000000009
62776+:10D7200000000000000000000000000000000000F9
62777+:10D7300000000000000000000000000000000000E9
62778+:10D7400000000000000000000000000000000000D9
62779+:10D7500000000000000000000000000000000000C9
62780+:10D7600000000000000000000000000000000000B9
62781+:10D7700000000000000000000000000000000000A9
62782+:10D780000000000000000000000000000000000099
62783+:10D790000000000000000000000000000000000089
62784+:10D7A0000000000000000000000000000000000079
62785+:10D7B0000000000000000000000000000000000069
62786+:10D7C0000000000000000000000000000000000059
62787+:10D7D0000000000000000000000000000000000049
62788+:10D7E0000000000000000000000000000000000039
62789+:10D7F0000000000000000000000000000000000029
62790+:10D800000000000000000000000000000000000018
62791+:10D810000000000000000000000000000000000008
62792+:10D8200000000000000000000000000000000000F8
62793+:10D8300000000000000000000000000000000000E8
62794+:10D8400000000000000000000000000000000000D8
62795+:10D8500000000000000000000000000000000000C8
62796+:10D8600000000000000000000000000000000000B8
62797+:10D8700000000000000000000000000000000000A8
62798+:10D880000000000000000000000000000000000098
62799+:10D890000000000000000000000000000000000088
62800+:10D8A0000000000000000000000000000000000078
62801+:10D8B0000000000000000000000000000000000068
62802+:10D8C0000000000000000000000000000000000058
62803+:10D8D0000000000000000000000000000000000048
62804+:10D8E0000000000000000000000000000000000038
62805+:10D8F0000000000000000000000000000000000028
62806+:10D900000000000000000000000000000000000017
62807+:10D910000000000000000000000000000000000007
62808+:10D9200000000000000000000000000000000000F7
62809+:10D9300000000000000000000000000000000000E7
62810+:10D9400000000000000000000000000000000000D7
62811+:10D9500000000000000000000000000000000000C7
62812+:10D9600000000000000000000000000000000000B7
62813+:10D9700000000000000000000000000000000000A7
62814+:10D980000000000000000000000000000000000097
62815+:10D990000000000000000000000000000000000087
62816+:10D9A0000000000000000000000000000000000077
62817+:10D9B0000000000000000000000000000000000067
62818+:10D9C0000000000000000000000000000000000057
62819+:10D9D0000000000000000000000000000000000047
62820+:10D9E0000000000000000000000000000000000037
62821+:10D9F0000000000000000000000000000000000027
62822+:10DA00000000000000000000000000000000000016
62823+:10DA10000000000000000000000000000000000006
62824+:10DA200000000000000000000000000000000000F6
62825+:10DA300000000000000000000000000000000000E6
62826+:10DA400000000000000000000000000000000000D6
62827+:10DA500000000000000000000000000000000000C6
62828+:10DA600000000000000000000000000000000000B6
62829+:10DA700000000000000000000000000000000000A6
62830+:10DA80000000000000000000000000000000000096
62831+:10DA90000000000000000000000000000000000086
62832+:10DAA0000000000000000000000000000000000076
62833+:10DAB0000000000000000000000000000000000066
62834+:10DAC0000000000000000000000000000000000056
62835+:10DAD0000000000000000000000000000000000046
62836+:10DAE0000000000000000000000000000000000036
62837+:10DAF0000000000000000000000000000000000026
62838+:10DB00000000000000000000000000000000000015
62839+:10DB10000000000000000000000000000000000005
62840+:10DB200000000000000000000000000000000000F5
62841+:10DB300000000000000000000000000000000000E5
62842+:10DB400000000000000000000000000000000000D5
62843+:10DB500000000000000000000000000000000000C5
62844+:10DB600000000000000000000000000000000000B5
62845+:10DB700000000000000000000000000000000000A5
62846+:10DB80000000000000000000000000000000000095
62847+:10DB90000000000000000000000000000000000085
62848+:10DBA0000000000000000000000000000000000075
62849+:10DBB0000000000000000000000000000000000065
62850+:10DBC0000000000000000000000000000000000055
62851+:10DBD0000000000000000000000000000000000045
62852+:10DBE0000000000000000000000000000000000035
62853+:10DBF0000000000000000000000000000000000025
62854+:10DC00000000000000000000000000000000000014
62855+:10DC10000000000000000000000000000000000004
62856+:10DC200000000000000000000000000000000000F4
62857+:10DC300000000000000000000000000000000000E4
62858+:10DC400000000000000000000000000000000000D4
62859+:10DC500000000000000000000000000000000000C4
62860+:10DC600000000000000000000000000000000000B4
62861+:10DC700000000000000000000000000000000000A4
62862+:10DC80000000000000000000000000000000000094
62863+:10DC90000000000000000000000000000000000084
62864+:10DCA0000000000000000000000000000000000074
62865+:10DCB0000000000000000000000000000000000064
62866+:10DCC0000000000000000000000000000000000054
62867+:10DCD0000000000000000000000000000000000044
62868+:10DCE0000000000000000000000000000000000034
62869+:10DCF0000000000000000000000000000000000024
62870+:10DD00000000000000000000000000000000000013
62871+:10DD10000000000000000000000000000000000003
62872+:10DD200000000000000000000000000000000000F3
62873+:10DD300000000000000000000000000000000000E3
62874+:10DD400000000000000000000000000000000000D3
62875+:10DD500000000000000000000000000000000000C3
62876+:10DD600000000000000000000000000000000000B3
62877+:10DD700000000000000000000000000000000000A3
62878+:10DD80000000000000000000000000000000000093
62879+:10DD90000000000000000000000000000000000083
62880+:10DDA0000000000000000000000000000000000073
62881+:10DDB0000000000000000000000000000000000063
62882+:10DDC0000000000000000000000000000000000053
62883+:10DDD0000000000000000000000000000000000043
62884+:10DDE0000000000000000000000000000000000033
62885+:10DDF0000000000000000000000000000000000023
62886+:10DE00000000000000000000000000000000000012
62887+:10DE10000000000000000000000000000000000002
62888+:10DE200000000000000000000000000000000000F2
62889+:10DE300000000000000000000000000000000000E2
62890+:10DE400000000000000000000000000000000000D2
62891+:10DE500000000000000000000000000000000000C2
62892+:10DE600000000000000000000000000000000000B2
62893+:10DE700000000000000000000000000000000000A2
62894+:10DE80000000000000000000000000000000000092
62895+:10DE90000000000000000000000000000000000082
62896+:10DEA0000000000000000000000000000000000072
62897+:10DEB0000000000000000000000000000000000062
62898+:10DEC0000000000000000000000000000000000052
62899+:10DED0000000000000000000000000000000000042
62900+:10DEE0000000000000000000000000000000000032
62901+:10DEF0000000000000000000000000000000000022
62902+:10DF00000000000000000000000000000000000011
62903+:10DF10000000000000000000000000000000000001
62904+:10DF200000000000000000000000000000000000F1
62905+:10DF300000000000000000000000000000000000E1
62906+:10DF400000000000000000000000000000000000D1
62907+:10DF500000000000000000000000000000000000C1
62908+:10DF600000000000000000000000000000000000B1
62909+:10DF700000000000000000000000000000000000A1
62910+:10DF80000000000000000000000000000000000091
62911+:10DF90000000000000000000000000000000000081
62912+:10DFA0000000000000000000000000000000000071
62913+:10DFB0000000000000000000000000000000000061
62914+:10DFC0000000000000000000000000000000000051
62915+:10DFD0000000000000000000000000000000000041
62916+:10DFE0000000000000000000000000000000000031
62917+:10DFF0000000000000000000000000000000000021
62918+:10E000000000000000000000000000000000000010
62919+:10E010000000000000000000000000000000000000
62920+:10E0200000000000000000000000000000000000F0
62921+:10E0300000000000000000000000000000000000E0
62922+:10E0400000000000000000000000000000000000D0
62923+:10E0500000000000000000000000000000000000C0
62924+:10E0600000000000000000000000000000000000B0
62925+:10E0700000000000000000000000000000000000A0
62926+:10E080000000000000000000000000000000000090
62927+:10E090000000000000000000000000000000000080
62928+:10E0A0000000000000000000000000000000000070
62929+:10E0B0000000000000000000000000000000000060
62930+:10E0C0000000000000000000000000000000000050
62931+:10E0D0000000000000000000000000000000000040
62932+:10E0E0000000000000000000000000000000000030
62933+:10E0F0000000000000000000000000000000000020
62934+:10E10000000000000000000000000000000000000F
62935+:10E1100000000000000000000000000000000000FF
62936+:10E1200000000000000000000000000000000000EF
62937+:10E1300000000000000000000000000000000000DF
62938+:10E1400000000000000000000000000000000000CF
62939+:10E1500000000000000000000000000000000000BF
62940+:10E1600000000000000000000000000000000000AF
62941+:10E17000000000000000000000000000000000009F
62942+:10E18000000000000000000000000000000000008F
62943+:10E19000000000000000000000000000000000007F
62944+:10E1A000000000000000000000000000000000006F
62945+:10E1B000000000000000000000000000000000005F
62946+:10E1C000000000000000000000000000000000004F
62947+:10E1D000000000000000000000000000000000003F
62948+:10E1E000000000000000000000000000000000002F
62949+:10E1F000000000000000000000000000000000809F
62950+:10E20000000000000000000000000000000000000E
62951+:10E2100000000000000000000000000000000000FE
62952+:10E220000000000A000000000000000000000000E4
62953+:10E2300010000003000000000000000D0000000DB1
62954+:10E240003C020801244295C03C030801246397FC6A
62955+:10E25000AC4000000043202B1480FFFD244200044A
62956+:10E260003C1D080037BD9FFC03A0F0213C100800B6
62957+:10E27000261032103C1C0801279C95C00E0012BECF
62958+:10E28000000000000000000D3C02800030A5FFFFF0
62959+:10E2900030C600FF344301803C0880008D0901B87E
62960+:10E2A0000520FFFE00000000AC6400002404000212
62961+:10E2B000A4650008A066000AA064000BAC67001803
62962+:10E2C0003C03100003E00008AD0301B83C0560000A
62963+:10E2D0008CA24FF80440FFFE00000000ACA44FC029
62964+:10E2E0003C0310003C040200ACA44FC403E000084F
62965+:10E2F000ACA34FF89486000C00A050212488001491
62966+:10E3000000062B0200051080004448210109182B4B
62967+:10E310001060001100000000910300002C6400094F
62968+:10E320005080000991190001000360803C0D080134
62969+:10E3300025AD9258018D58218D67000000E000083E
62970+:10E340000000000091190001011940210109302B42
62971+:10E3500054C0FFF29103000003E000080000102108
62972+:10E360000A000CCC25080001910F0001240E000AC0
62973+:10E3700015EE00400128C8232F38000A1700003D81
62974+:10E38000250D00028D580000250F0006370E0100F4
62975+:10E39000AD4E0000910C000291AB000191A400026F
62976+:10E3A00091A60003000C2E00000B3C0000A71025D6
62977+:10E3B00000041A000043C8250326C025AD580004F8
62978+:10E3C000910E000691ED000191E7000291E5000336
62979+:10E3D000000E5E00000D6400016C30250007220075
62980+:10E3E00000C41025004518252508000A0A000CCC99
62981+:10E3F000AD430008910F000125040002240800022B
62982+:10E4000055E80001012020210A000CCC00804021A9
62983+:10E41000910C0001240B0003158B00160000000076
62984+:10E420008D580000910E000225080003370D0008EA
62985+:10E43000A14E00100A000CCCAD4D00009119000156
62986+:10E44000240F0004172F000B0000000091070002AA
62987+:10E45000910400038D43000000072A0000A410254A
62988+:10E460003466000425080004AD42000C0A000CCC00
62989+:10E47000AD46000003E000082402000127BDFFE8CC
62990+:10E48000AFBF0014AFB000100E00164E0080802108
62991+:10E490003C0480083485008090A600052403FFFE1C
62992+:10E4A0000200202100C310248FBF00148FB0001081
62993+:10E4B000A0A200050A00165827BD001827BDFFE8D6
62994+:10E4C000AFB00010AFBF00140E000FD40080802149
62995+:10E4D0003C06800834C5008090A40000240200504F
62996+:10E4E000308300FF106200073C09800002002021F9
62997+:10E4F0008FBF00148FB00010AD2001800A00108F74
62998+:10E5000027BD0018240801003C07800002002021DC
62999+:10E510008FBF00148FB00010ACE801800A00108F8C
63000+:10E5200027BD001827BDFF783C058008AFBE0080DE
63001+:10E53000AFB7007CAFB3006CAFB10064AFBF008475
63002+:10E54000AFB60078AFB50074AFB40070AFB200687A
63003+:10E55000AFB0006034A600803C0580008CB201287A
63004+:10E5600090C400098CA701043C020001309100FF17
63005+:10E5700000E218240000B8210000F021106000071C
63006+:10E58000000098213C0908008D2931F02413000176
63007+:10E59000252800013C010800AC2831F0ACA0008423
63008+:10E5A00090CC0005000C5827316A0001154000721C
63009+:10E5B000AFA0005090CD00002406002031A400FF41
63010+:10E5C00010860018240E0050108E009300000000EA
63011+:10E5D0003C1008008E1000DC260F00013C010800F2
63012+:10E5E000AC2F00DC0E0016C7000000000040182110
63013+:10E5F0008FBF00848FBE00808FB7007C8FB60078FD
63014+:10E600008FB500748FB400708FB3006C8FB2006848
63015+:10E610008FB100648FB000600060102103E000083B
63016+:10E6200027BD00880000000D3C1F8000AFA0003017
63017+:10E6300097E501168FE201043C04002030B9FFFF8A
63018+:10E64000004438240007182B00033140AFA60030E7
63019+:10E650008FF5010437F80C003C1600400338802188
63020+:10E6600002B6A02434C40040128000479215000D69
63021+:10E6700032A800201500000234860080008030217E
63022+:10E6800014C0009FAFA600303C0D800835A6008066
63023+:10E6900090CC0008318B0040516000063C06800899
63024+:10E6A000240E0004122E00A8240F0012122F003294
63025+:10E6B0003C06800834C401003C0280009447011AE3
63026+:10E6C0009619000E909F00088E18000830E3FFFF97
63027+:10E6D00003F9B00432B40004AFB6005CAFA3005835
63028+:10E6E0008E1600041280002EAFB8005434C3008090
63029+:10E6F000906800083105004014A0002500000000CB
63030+:10E700008C70005002D090230640000500000000ED
63031+:10E710008C71003402D1A82306A201678EE20008A2
63032+:10E72000126000063C1280003C1508008EB531F4E2
63033+:10E7300026B600013C010800AC3631F4AE4000447E
63034+:10E74000240300018FBF00848FBE00808FB7007C40
63035+:10E750008FB600788FB500748FB400708FB3006CE3
63036+:10E760008FB200688FB100648FB00060006010212C
63037+:10E7700003E0000827BD00880E000D2800002021BE
63038+:10E780000A000D75004018210A000D9500C02021D7
63039+:10E790000E00171702C020211440FFE10000000006
63040+:10E7A0003C0B8008356400808C8A003402CA482300
63041+:10E7B0000520001D000000003C1E08008FDE310017
63042+:10E7C00027D700013C010800AC3731001260000679
63043+:10E7D000024020213C1408008E9431F42690000160
63044+:10E7E0003C010800AC3031F40E00164E3C1E80088F
63045+:10E7F00037CD008091B700250240202136EE00047D
63046+:10E800000E001658A1AE00250E000CAC02402021CF
63047+:10E810000A000DCA240300013C17080126F796C020
63048+:10E820000A000D843C1F80008C86003002C66023E5
63049+:10E830001980000C2419000C908F004F3C14080024
63050+:10E840008E94310032B500FC35ED0001268E0001BA
63051+:10E850003C010800AC2E3100A08D004FAFA0005845
63052+:10E860002419000CAFB900308C9800300316A02397
63053+:10E870001A80010B8FA300580074F82A17E0FFD309
63054+:10E88000000000001074002A8FA5005802D4B021A7
63055+:10E8900000B410233044FFFFAFA4005832A8000298
63056+:10E8A0001100002E32AB00103C15800836B00080FD
63057+:10E8B0009216000832D30040526000FB8EE200083E
63058+:10E8C0000E00164E02402021240A0018A20A000958
63059+:10E8D000921100052409FFFE024020210229902404
63060+:10E8E0000E001658A2120005240400390000282149
63061+:10E8F0000E0016F2240600180A000DCA24030001B7
63062+:10E9000092FE000C3C0A800835490080001EBB00C6
63063+:10E910008D27003836F10081024020213225F08118
63064+:10E920000E000C9B30C600FF0A000DC10000000065
63065+:10E930003AA7000130E300011460FFA402D4B02123
63066+:10E940000A000E1D00000000024020210E001734B6
63067+:10E95000020028210A000D75004018211160FF7087
63068+:10E960003C0F80083C0D800835EE00808DC40038D7
63069+:10E970008FA300548DA60004006660231D80FF68ED
63070+:10E98000000000000064C02307020001AFA400548F
63071+:10E990003C1F08008FFF31E433F9000113200015FC
63072+:10E9A0008FAC00583C07800094E3011A10600012FD
63073+:10E9B0003C0680080E00216A024020213C03080129
63074+:10E9C000906396F13064000214800145000000005D
63075+:10E9D000306C0004118000078FAC0058306600FBDB
63076+:10E9E0003C010801A02696F132B500FCAFA000580A
63077+:10E9F0008FAC00583C06800834D30080AFB40018B8
63078+:10EA0000AFB60010AFAC00143C088000950B01209D
63079+:10EA10008E6F0030966A005C8FA3005C8FBF003061
63080+:10EA20003169FFFF3144FFFF8FAE005401341021E4
63081+:10EA3000350540000064382B0045C82103E7C02598
63082+:10EA4000AFB90020AFAF0028AFB80030AFAF00249F
63083+:10EA5000AFA0002CAFAE0034926D000831B40008B6
63084+:10EA6000168000BB020020218EE200040040F8095D
63085+:10EA700027A400108FAF003031F300025660000170
63086+:10EA800032B500FE3C048008349F008093F90008F2
63087+:10EA900033380040530000138FA400248C850004F9
63088+:10EAA0008FA7005410A700D52404001432B0000131
63089+:10EAB0001200000C8FA400242414000C1234011A3C
63090+:10EAC0002A2D000D11A001022413000E240E000AAD
63091+:10EAD000522E0001241E00088FAF002425E40001FF
63092+:10EAE000AFA400248FAA00143C0B80083565008079
63093+:10EAF000008A48218CB10030ACA9003090A4004EAF
63094+:10EB00008CA700303408FFFF0088180400E3F821C8
63095+:10EB1000ACBF00348FA600308FB900548FB8005CB2
63096+:10EB200030C200081040000B033898218CAC002044
63097+:10EB3000119300D330C600FF92EE000C8FA7003473
63098+:10EB400002402021000E6B0035B400800E000C9BAB
63099+:10EB50003285F0803C028008345000808E0F0030F7
63100+:10EB600001F1302318C00097264800803C070800B8
63101+:10EB70008CE731E42404FF80010418243118007F5D
63102+:10EB80003C1F80003C19800430F10001AFE300908D
63103+:10EB900012200006031928213C030801906396F116
63104+:10EBA00030690008152000C6306A00F73C10800864
63105+:10EBB00036040080908C004F318B000115600042BC
63106+:10EBC000000000003C0608008CC6319830CE0010D2
63107+:10EBD00051C0004230F9000190AF006B55E0003F9A
63108+:10EBE00030F9000124180001A0B8006B3C1180002E
63109+:10EBF0009622007A24470064A48700123C0D800806
63110+:10EC000035A5008090B40008329000401600000442
63111+:10EC10003C03800832AE000115C0008B00000000EC
63112+:10EC2000346400808C86002010D3000A3463010015
63113+:10EC30008C67000002C7782319E000978FBF00544B
63114+:10EC4000AC93002024130001AC760000AFB3005059
63115+:10EC5000AC7F000417C0004E000000008FA90050D8
63116+:10EC60001520000B000000003C030801906396F1A2
63117+:10EC7000306A00011140002E8FAB0058306400FE56
63118+:10EC80003C010801A02496F10A000D75000018212E
63119+:10EC90000E000CAC024020210A000F1300000000FF
63120+:10ECA0000A000E200000A0210040F80924040017EB
63121+:10ECB0000A000DCA240300010040F80924040016CC
63122+:10ECC0000A000DCA240300019094004F240DFFFE9A
63123+:10ECD000028D2824A085004F30F900011320000682
63124+:10ECE0003C0480083C030801906396F1307F0010DB
63125+:10ECF00017E00051306800EF34900080240A0001D2
63126+:10ED0000024020210E00164EA60A00129203002592
63127+:10ED100024090001AFA90050346200010240202103
63128+:10ED20000E001658A20200250A000EF93C0D8008BC
63129+:10ED30001160FE83000018218FA5003030AC000464
63130+:10ED40001180FE2C8FBF00840A000DCB240300012C
63131+:10ED500027A500380E000CB6AFA000385440FF4382
63132+:10ED60008EE200048FB40038329001005200FF3F61
63133+:10ED70008EE200048FA3003C8E6E0058006E682364
63134+:10ED800005A3FF39AE6300580A000E948EE200041A
63135+:10ED90000E00164E024020213C038008346800809B
63136+:10EDA000024020210E001658A11E000903C0302188
63137+:10EDB000240400370E0016F2000028210A000F116B
63138+:10EDC0008FA900508FAB00185960FF8D3C0D800853
63139+:10EDD0000E00164E02402021920C00252405000151
63140+:10EDE000AFA5005035820004024020210E001658C5
63141+:10EDF000A20200250A000EF93C0D800812240059D9
63142+:10EE00002A2300151060004D240900162408000C68
63143+:10EE10005628FF2732B000013C0A8008914C001BA5
63144+:10EE20002406FFBD241E000E01865824A14B001BA2
63145+:10EE30000A000EA532B000013C010801A02896F19D
63146+:10EE40000A000EF93C0D80088CB500308EFE0008DB
63147+:10EE50002404001826B6000103C0F809ACB600303F
63148+:10EE60003C030801906396F13077000116E0FF81C2
63149+:10EE7000306A00018FB200300A000D753243000481
63150+:10EE80003C1080009605011A50A0FF2B34C60010DC
63151+:10EE90000A000EC892EE000C8C6200001456FF6D42
63152+:10EEA000000000008C7800048FB9005403388823D8
63153+:10EEB0000621FF638FBF00540A000F0E0000000000
63154+:10EEC0003C010801A02A96F10A000F3030F9000138
63155+:10EED0001633FF028FAF00240A000EB0241E00106C
63156+:10EEE0000E00164E024020213C0B80083568008041
63157+:10EEF00091090025240A0001AFAA0050353300040F
63158+:10EF0000024020210E001658A11300253C050801DF
63159+:10EF100090A596F130A200FD3C010801A02296F1D7
63160+:10EF20000A000E6D004018212411000E53D1FEEA94
63161+:10EF3000241E00100A000EAF241E00165629FEDC07
63162+:10EF400032B000013C0A8008914C001B2406FFBD32
63163+:10EF5000241E001001865824A14B001B0A000EA598
63164+:10EF600032B000010A000EA4241E00123C038000EF
63165+:10EF70008C6201B80440FFFE24040800AC6401B8B0
63166+:10EF800003E000080000000030A5FFFF30C6FFFFCF
63167+:10EF90003C0780008CE201B80440FFFE34EA0180A7
63168+:10EFA000AD440000ACE400203C0480089483004899
63169+:10EFB0003068FFFF11000016AF88000824AB001274
63170+:10EFC000010B482B512000133C04800034EF01005A
63171+:10EFD00095EE00208F890000240D001A31CCFFFF30
63172+:10EFE00031274000A14D000B10E000362583FFFEC5
63173+:10EFF0000103C02B170000348F9900048F88000490
63174+:10F00000A5430014350700010A001003AF87000470
63175+:10F010003C04800024030003348201808F890000B7
63176+:10F020008F870004A043000B3C088000350C018052
63177+:10F03000A585000EA585001A8F85000C30EB800099
63178+:10F04000A5890010AD850028A58600081160000F75
63179+:10F050008F85001435190100972A00163158FFFCDE
63180+:10F06000270F000401E870218DCD400031A6FFFF7D
63181+:10F0700014C000072403BFFF3C02FFFF34487FFF9A
63182+:10F0800000E83824AF8700048F8500142403BFFFF5
63183+:10F090003C04800000E3582434830180A46B0026E4
63184+:10F0A000AC69002C10A0000300054C02A465001000
63185+:10F0B000A46900263C071000AC8701B803E00008F3
63186+:10F0C000000000008F990004240AFFFE032A382460
63187+:10F0D0000A001003AF87000427BDFFE88FA20028B5
63188+:10F0E00030A5FFFF30C6FFFFAFBF0010AF87000C99
63189+:10F0F000AF820014AF8000040E000FDBAF80000071
63190+:10F100008FBF001027BD001803E00008AF80001477
63191+:10F110003C06800034C4007034C701008C8A0000B3
63192+:10F1200090E500128F84000027BDFFF030A300FFA0
63193+:10F13000000318823082400010400037246500032D
63194+:10F140000005C8800326C0218F0E4000246F0004F4
63195+:10F15000000F6880AFAE000001A660218D8B4000DB
63196+:10F16000AFAB000494E900163128FFFC01063821FA
63197+:10F170008CE64000AFA600088FA9000800003021EF
63198+:10F18000000028213C07080024E701000A0010675E
63199+:10F19000240800089059000024A500012CAC000CA4
63200+:10F1A0000079C0210018788001E770218DCD000022
63201+:10F1B0001180000600CD302603A5102114A8FFF50C
63202+:10F1C00000051A005520FFF4905900003C0480000F
63203+:10F1D000348700703C0508008CA531048CE30000E6
63204+:10F1E0002CA2002010400009006A38230005488046
63205+:10F1F0003C0B0800256B3108012B402124AA00019B
63206+:10F20000AD0700003C010800AC2A310400C0102109
63207+:10F2100003E0000827BD0010308220001040000BE2
63208+:10F2200000055880016648218D24400024680004B0
63209+:10F2300000083880AFA4000000E618218C6540006B
63210+:10F24000AFA000080A001057AFA500040000000D91
63211+:10F250000A0010588FA9000827BDFFE03C07800076
63212+:10F2600034E60100AFBF001CAFB20018AFB100140C
63213+:10F27000AFB0001094C5000E8F87000030A4FFFFD0
63214+:10F280002483000430E2400010400010AF830028C7
63215+:10F290003C09002000E940241100000D30EC800002
63216+:10F2A0008F8A0004240BBFFF00EB38243543100085
63217+:10F2B000AF87000030F220001640000B3C1900041C
63218+:10F2C000241FFFBF0A0010B7007F102430EC80001D
63219+:10F2D000158000423C0E002030F220001240FFF862
63220+:10F2E0008F8300043C19000400F9C0241300FFF5CB
63221+:10F2F000241FFFBF34620040AF82000430E20100EF
63222+:10F300001040001130F010008F83002C10600006B8
63223+:10F310003C0F80003C05002000E52024148000C044
63224+:10F320003C0800043C0F800035EE010095CD001E26
63225+:10F3300095CC001C31AAFFFF000C5C00014B482556
63226+:10F34000AF89000C30F010001200000824110001F9
63227+:10F3500030F100201620008B3C18100000F890249B
63228+:10F36000164000823C040C002411000130E801002A
63229+:10F370001500000B3C0900018F85000430A94000F6
63230+:10F38000152000073C0900013C0C1F0100EC58242B
63231+:10F390003C0A1000116A01183C1080003C09000171
63232+:10F3A00000E9302410C000173C0B10003C18080086
63233+:10F3B0008F1800243307000214E0014024030001E9
63234+:10F3C0008FBF001C8FB200188FB100148FB00010D7
63235+:10F3D0000060102103E0000827BD002000EE682433
63236+:10F3E00011A0FFBE30F220008F8F00043C11FFFF00
63237+:10F3F00036307FFF00F0382435E380000A0010A685
63238+:10F40000AF87000000EB102450400065AF8000245F
63239+:10F410008F8C002C3C0D0F0000ED18241580008807
63240+:10F42000AF83001030E8010011000086938F0010B8
63241+:10F430003C0A0200106A00833C1280003650010032
63242+:10F44000920500139789002A3626000230AF00FF8C
63243+:10F4500025EE0004000E19C03C0480008C9801B811
63244+:10F460000700FFFE34880180AD0300003C198008CE
63245+:10F47000AC830020973100483225FFFF10A0015CCB
63246+:10F48000AF8500082523001200A3F82B53E0015993
63247+:10F490008F850004348D010095AC00202402001AF1
63248+:10F4A00030E44000318BFFFFA102000B108001927D
63249+:10F4B0002563FFFE00A3502B154001908F8F0004A1
63250+:10F4C000A50300148F88000435050001AF850004F2
63251+:10F4D0003C08800035190180A729000EA729001AD1
63252+:10F4E0008F89000C30B18000A7270010AF290028B9
63253+:10F4F000A72600081220000E3C04800035020100FF
63254+:10F50000944C0016318BFFFC256400040088182100
63255+:10F510008C7F400033E6FFFF14C000053C048000F0
63256+:10F520003C0AFFFF354D7FFF00AD2824AF85000466
63257+:10F53000240EBFFF00AE402434850180A4A800261D
63258+:10F54000ACA7002C3C071000AC8701B800001821C4
63259+:10F550008FBF001C8FB200188FB100148FB0001045
63260+:10F560000060102103E0000827BD00203C020BFFD3
63261+:10F5700000E41824345FFFFF03E3C82B5320FF7B14
63262+:10F58000241100013C0608008CC6002C24C5000193
63263+:10F590003C010800AC25002C0A0010D42411000501
63264+:10F5A0008F85002410A0002FAF80001090A30000D2
63265+:10F5B000146000792419000310A0002A30E601002D
63266+:10F5C00010C000CC8F860010241F000210DF00C97D
63267+:10F5D0008F8B000C3C0708008CE7003824E4FFFF09
63268+:10F5E00014E0000201641824000018213C0D0800FA
63269+:10F5F00025AD0038006D1021904C00048F85002847
63270+:10F6000025830004000321C030A5FFFF3626000239
63271+:10F610000E000FDB000000000A00114D0000182151
63272+:10F6200000E8302414C0FF403C0F80000E00103D65
63273+:10F63000000000008F8700000A0010CAAF82000C93
63274+:10F64000938F00103C18080127189640000F90C0B7
63275+:10F6500002588021AF9000248F85002414A0FFD38E
63276+:10F66000AF8F00103C0480008C86400030C5010044
63277+:10F6700010A000BC322300043C0C08008D8C002438
63278+:10F6800024120004106000C23190000D3C04800080
63279+:10F690008C8D40003402FFFF11A201003231FFFBCC
63280+:10F6A0008C884000310A01005540000124110010EF
63281+:10F6B00030EE080011C000BE2419FFFB8F9800280F
63282+:10F6C0002F0F03EF51E000010219802430E90100FF
63283+:10F6D00011200014320800018F87002C14E000FB79
63284+:10F6E0008F8C000C3C05800034AB0100917F00132F
63285+:10F6F00033E300FF246A00042403FFFE0203802496
63286+:10F70000000A21C012000002023230253226FFFF1B
63287+:10F710000E000FDB9785002A1200FF290000182138
63288+:10F72000320800011100000D32180004240E0001FF
63289+:10F73000120E0002023230253226FFFF9785002A82
63290+:10F740000E000FDB00002021240FFFFE020F80249B
63291+:10F750001200FF1B00001821321800045300FF188C
63292+:10F760002403000102323025241200045612000145
63293+:10F770003226FFFF9785002A0E000FDB24040100CC
63294+:10F780002419FFFB021988241220FF0D0000182104
63295+:10F790000A0010E9240300011079009C00003021C8
63296+:10F7A00090AD00012402000211A200BE30EA004028
63297+:10F7B00090B90001241800011338007F30E900409F
63298+:10F7C0008CA600049785002A00C020210E000FDBC4
63299+:10F7D0003626000200004021010018218FBF001CC6
63300+:10F7E0008FB200188FB100148FB00010006010218C
63301+:10F7F00003E0000827BD0020360F010095EE000C45
63302+:10F8000031CD020015A0FEE63C0900013C1880083D
63303+:10F81000971200489789002A362600023248FFFFD7
63304+:10F82000AF8800083C0380008C7101B80620FFFE01
63305+:10F83000346A0180AD4000001100008E3C0F800052
63306+:10F84000253F0012011FC82B1320008B240E00033C
63307+:10F85000346C0100958B00202402001A30E4400033
63308+:10F860003163FFFFA142000B108000A72463FFFE5D
63309+:10F870000103682B15A000A52408FFFE34A5000194
63310+:10F88000A5430014AF8500043C0480002412BFFF90
63311+:10F8900000B2802434850180A4A9000EA4A9001A16
63312+:10F8A000A4A60008A4B00026A4A700103C071000DE
63313+:10F8B000AC8701B80A00114D000018213C038000FC
63314+:10F8C00034640100949F000E3C1908008F3900D861
63315+:10F8D0002404008033E5FFFF273100013C010800CC
63316+:10F8E000AC3100D80E000FDB240600030A00114DD6
63317+:10F8F00000001821240A000210CA00598F85002830
63318+:10F900003C0308008C6300D0240E0001106E005EE2
63319+:10F910002CCF000C24D2FFFC2E5000041600002136
63320+:10F9200000002021241800021078001B2CD9000CA4
63321+:10F9300024DFFFF82FE900041520FF330000202109
63322+:10F9400030EB020051600004000621C054C00022C8
63323+:10F9500030A5FFFF000621C030A5FFFF0A00117D82
63324+:10F96000362600023C0908008D29002431300001B0
63325+:10F970005200FEF7000018219785002A3626000263
63326+:10F980000E000FDB000020210A00114D000018219D
63327+:10F990000A00119C241200021320FFE624DFFFF866
63328+:10F9A0000000202130A5FFFF0A00117D362600024D
63329+:10F9B0000A0011AC021980245120FF828CA6000499
63330+:10F9C0003C05080190A5964110A0FF7E2408000187
63331+:10F9D0000A0011F0010018210E000FDB3226000191
63332+:10F9E0008F8600108F8500280A00124F000621C064
63333+:10F9F0008F8500043C18800024120003371001801A
63334+:10FA0000A212000B0A00112E3C08800090A30001F6
63335+:10FA1000241100011071FF70240800012409000264
63336+:10FA20005069000430E60040240800010A0011F08B
63337+:10FA30000100182150C0FFFD240800013C0C80008B
63338+:10FA4000358B01009563001094A40002307FFFFF06
63339+:10FA5000509FFF62010018210A001284240800014F
63340+:10FA60002CA803EF1100FE56240300010A001239EE
63341+:10FA700000000000240E000335EA0180A14E000BB7
63342+:10FA80000A00121C3C04800011E0FFA2000621C005
63343+:10FA900030A5FFFF0A00117D362600020A0011A5DD
63344+:10FAA000241100201140FFC63C1280003650010096
63345+:10FAB000960F001094AE000231E80FFF15C8FFC08A
63346+:10FAC000000000000A0011E690B900013C060800A1
63347+:10FAD0008CC6003824C4FFFF14C00002018418241F
63348+:10FAE000000018213C0D080025AD0038006D1021E4
63349+:10FAF0000A0011B6904300048F8F0004240EFFFE0D
63350+:10FB00000A00112C01EE28242408FFFE0A00121A14
63351+:10FB100000A8282427BDFFC8AFB00010AFBF003435
63352+:10FB20003C10600CAFBE0030AFB7002CAFB6002861
63353+:10FB3000AFB50024AFB40020AFB3001CAFB20018C3
63354+:10FB4000AFB100148E0E5000240FFF7F3C068000E2
63355+:10FB500001CF682435AC380C240B0003AE0C5000E8
63356+:10FB6000ACCB00083C010800AC2000200E001819A6
63357+:10FB7000000000003C0A0010354980513C06601628
63358+:10FB8000AE09537C8CC700003C0860148D0500A0B2
63359+:10FB90003C03FFFF00E320243C02535300051FC237
63360+:10FBA0001482000634C57C000003A08002869821E0
63361+:10FBB0008E7200043C116000025128218CBF007C31
63362+:10FBC0008CA200783C1E600037C420203C05080150
63363+:10FBD00024A59288AF820018AF9F001C0E0016DD8E
63364+:10FBE0002406000A3C190001273996403C01080010
63365+:10FBF000AC3931DC0E0020DDAF8000148FD708084F
63366+:10FC00002418FFF03C15570902F8B02412D502F56C
63367+:10FC100024040001AF80002C3C1480003697018042
63368+:10FC20003C1E080127DE9644369301008E900000AA
63369+:10FC30003205000310A0FFFD3207000110E000882C
63370+:10FC4000320600028E7100283C048000AE91002034
63371+:10FC50008E6500048E66000000A0382100C040219F
63372+:10FC60008C8301B80460FFFE3C0B0010240A0800DE
63373+:10FC700000AB4824AC8A01B8552000E0240BBFFF3C
63374+:10FC80009675000E3C1208008E52002030AC4000E9
63375+:10FC900032AFFFFF264E000125ED00043C010800B5
63376+:10FCA000AC2E0020118000E8AF8D00283C18002009
63377+:10FCB00000B8B02412C000E530B980002408BFFFAE
63378+:10FCC00000A8382434C81000AF87000030E62000B8
63379+:10FCD00010C000E92409FFBF3C03000400E328240E
63380+:10FCE00010A00002010910243502004030EA010092
63381+:10FCF00011400010AF8200048F8B002C11600007B0
63382+:10FD00003C0D002000ED6024118000043C0F000435
63383+:10FD100000EF702411C00239000000009668001E38
63384+:10FD20009678001C3115FFFF0018B40002B690252C
63385+:10FD3000AF92000C30F910001320001324150001BD
63386+:10FD400030FF002017E0000A3C04100000E41024FB
63387+:10FD50001040000D3C0A0C003C090BFF00EA18247F
63388+:10FD60003525FFFF00A3302B10C0000830ED010047
63389+:10FD70003C0C08008D8C002C24150005258B0001FF
63390+:10FD80003C010800AC2B002C30ED010015A0000B4D
63391+:10FD90003C0500018F85000430AE400055C00007CF
63392+:10FDA0003C0500013C161F0100F690243C0F10009A
63393+:10FDB000124F01CE000000003C05000100E5302498
63394+:10FDC00010C000AF3C0C10003C1F08008FFF002447
63395+:10FDD00033E90002152000712403000100601021A6
63396+:10FDE000104000083C0680003C08800035180100E7
63397+:10FDF0008F0F00243C056020ACAF00140000000011
63398+:10FE00003C0680003C194000ACD9013800000000DD
63399+:10FE10005220001332060002262B0140262C0080BF
63400+:10FE2000240EFF80016E2024018E6824000D1940ED
63401+:10FE3000318A007F0004A9403172007F3C16200007
63402+:10FE400036C20002006A482502B2382500E2882541
63403+:10FE50000122F825ACDF0830ACD1083032060002B0
63404+:10FE600010C0FF723C188000370501408CA80000CC
63405+:10FE700024100040AF08002090AF000831E300706C
63406+:10FE8000107000D428790041532000082405006038
63407+:10FE9000241100201071000E3C0A40003C09800033
63408+:10FEA000AD2A01780A001304000000001465FFFB6E
63409+:10FEB0003C0A40000E001FFA000000003C0A40000F
63410+:10FEC0003C098000AD2A01780A00130400000000FC
63411+:10FED00090A90009241F00048CA70000312800FF0E
63412+:10FEE000111F01B22503FFFA2C7200061240001404
63413+:10FEF0003C0680008CA9000494A4000A310500FF90
63414+:10FF000000095E022D6A00083086FFFF15400002DE
63415+:10FF10002567000424070003240C000910AC01FA33
63416+:10FF200028AD000A11A001DE2410000A240E0008EA
63417+:10FF300010AE0028000731C000C038213C06800008
63418+:10FF40008CD501B806A0FFFE34D20180AE47000078
63419+:10FF500034CB0140916E0008240300023C0A4000AB
63420+:10FF600031C400FF00046A0001A86025A64C000807
63421+:10FF7000A243000B9562000A3C0810003C09800077
63422+:10FF8000A64200108D670004AE470024ACC801B83B
63423+:10FF9000AD2A01780A001304000000003C0A80002A
63424+:10FFA000354401009483000E3C0208008C4200D8C6
63425+:10FFB000240400803065FFFF245500013C01080047
63426+:10FFC000AC3500D80E000FDB240600030A001370C6
63427+:10FFD000000018210009320230D900FF2418000166
63428+:10FFE0001738FFD5000731C08F910020262200016D
63429+:10FFF000AF8200200A0013C800C0382100CB2024A3
63430+:020000021000EC
63431+:10000000AF85000010800008AF860004240D87FF34
63432+:1000100000CD6024158000083C0E006000AE302446
63433+:1000200010C00005000000000E000D42000000009E
63434+:100030000A001371000000000E0016050000000009
63435+:100040000A0013710000000030B980005320FF1F28
63436+:10005000AF8500003C02002000A2F82453E0FF1B03
63437+:10006000AF8500003C07FFFF34E47FFF00A4382485
63438+:100070000A00132B34C880000A001334010910242D
63439+:1000800000EC58245160005AAF8000248F8D002C62
63440+:100090003C0E0F0000EE182415A00075AF83001071
63441+:1000A00030EF010011E00073939800103C12020041
63442+:1000B000107200703C06800034D9010093280013B0
63443+:1000C0009789002A36A60002311800FF271600047F
63444+:1000D000001619C03C0480008C8501B804A0FFFE06
63445+:1000E00034880180AD0300003C158008AC830020FB
63446+:1000F00096BF004833E5FFFF10A001BCAF850008A4
63447+:100100002523001200A3102B504001B98F85000455
63448+:10011000348D010095AC0020240B001A30E440001F
63449+:10012000318AFFFFA10B000B108001BA2543FFFEAF
63450+:1001300000A3702B15C001B88F9600048F8F0004A8
63451+:10014000A503001435E50001AF8500043C088000DC
63452+:1001500035150180A6A9000EA6A9001A8F89000CEA
63453+:1001600030BF8000A6A70010AEA90028A6A60008F0
63454+:1001700013E0000F3C0F8000350C0100958B00163A
63455+:10018000316AFFFC25440004008818218C6240007D
63456+:100190003046FFFF14C000072416BFFF3C0EFFFFD0
63457+:1001A00035CD7FFF00AD2824AF8500043C0F8000D3
63458+:1001B0002416BFFF00B6902435E50180A4B20026C6
63459+:1001C000ACA7002C3C071000ADE701B80A00137083
63460+:1001D000000018210E00165D000000003C0A4000DF
63461+:1001E0003C098000AD2A01780A00130400000000D9
63462+:1001F0008F85002410A00027AF80001090A300007E
63463+:10020000106000742409000310690101000030210E
63464+:1002100090AE0001240D000211CD014230EF0040EC
63465+:1002200090A90001241F0001113F000930E20040A5
63466+:100230008CA600049785002A00C020210E000FDB49
63467+:1002400036A60002000040210A00137001001821A8
63468+:100250005040FFF88CA600043C07080190E7964147
63469+:1002600010E0FFF4240800010A00137001001821B7
63470+:10027000939800103C1F080127FF96400018C8C043
63471+:10028000033F4021AF8800248F85002414A0FFDBAA
63472+:10029000AF9800103C0480008C86400030C50100FF
63473+:1002A00010A0008732AB00043C0C08008D8C0024A9
63474+:1002B00024160004156000033192000D241600027C
63475+:1002C0003C0480008C8E4000340DFFFF11CD0113E3
63476+:1002D00032B5FFFB8C984000330F010055E0000160
63477+:1002E0002415001030E80800110000382409FFFB35
63478+:1002F0008F9F00282FF903EF53200001024990241B
63479+:1003000030E2010010400014325F00018F87002CA2
63480+:1003100014E0010E8F8C000C3C0480003486010038
63481+:1003200090C5001330AA00FF25430004000321C03C
63482+:100330002419FFFE025990241240000202B6302513
63483+:1003400032A6FFFF0E000FDB9785002A1240FEA3A6
63484+:1003500000001821325F000113E0000D3247000455
63485+:10036000240900011249000202B6302532A6FFFF1F
63486+:100370009785002A0E000FDB000020212402FFFEDB
63487+:10038000024290241240FE950000182132470004DA
63488+:1003900050E0FE922403000102B63025241600042A
63489+:1003A0005656000132A6FFFF9785002A0E000FDB8C
63490+:1003B000240401002403FFFB0243A82412A0FE87AB
63491+:1003C000000018210A001370240300010A0014B968
63492+:1003D0000249902410A0FFAF30E5010010A00017E3
63493+:1003E0008F8600102403000210C300148F84000CB9
63494+:1003F0003C0608008CC6003824CAFFFF14C0000267
63495+:10040000008A1024000010213C0E080025CE003880
63496+:10041000004E682191AC00048F850028258B0004D4
63497+:10042000000B21C030A5FFFF36A600020E000FDB37
63498+:10043000000000000A00137000001821240F0002C1
63499+:1004400010CF0088241600013C0308008C6300D004
63500+:100450001076008D8F85002824D9FFFC2F280004FA
63501+:100460001500006300002021241F0002107F005DA2
63502+:100470002CC9000C24C3FFF82C6200041440FFE9CF
63503+:100480000000202130EA020051400004000621C093
63504+:1004900054C0000530A5FFFF000621C030A5FFFFB6
63505+:1004A0000A00150436A600020E000FDB32A600017A
63506+:1004B0008F8600108F8500280A001520000621C0B5
63507+:1004C0003C0A08008D4A0024315200015240FE438C
63508+:1004D000000018219785002A36A600020E000FDBC7
63509+:1004E000000020210A001370000018219668000CFB
63510+:1004F000311802005700FE313C0500013C1F800806
63511+:1005000097F900489789002A36A600023328FFFF92
63512+:10051000AF8800083C0380008C7501B806A0FFFE80
63513+:100520003C04800034820180AC400000110000B621
63514+:1005300024180003252A0012010A182B106000B2AB
63515+:1005400000000000966F00203C0E8000240D001A71
63516+:1005500031ECFFFF35CA018030EB4000A14D000BAC
63517+:10056000116000B02583FFFE0103902B164000AE02
63518+:100570002416FFFE34A50001A5430014AF85000436
63519+:100580002419BFFF00B94024A6E9000EA6E9001A0D
63520+:10059000A6E60008A6E80026A6E700103C07100023
63521+:1005A000AE8701B80A001370000018213C048000D7
63522+:1005B0008C8201B80440FFFE349601802415001C93
63523+:1005C000AEC70000A2D5000B3C071000AC8701B8F5
63524+:1005D0003C0A40003C098000AD2A01780A0013045F
63525+:1005E000000000005120FFA424C3FFF800002021D8
63526+:1005F00030A5FFFF0A00150436A600020E00103DCC
63527+:10060000000000008F8700000A001346AF82000C34
63528+:1006100090A30001241500011075FF0B24080001B0
63529+:10062000240600021066000430E2004024080001A5
63530+:100630000A001370010018215040FFFD240800013A
63531+:100640003C0C8000358B0100956A001094A40002D8
63532+:100650003143FFFF5083FDE1010018210A00158599
63533+:10066000240800018F8500282CB203EF1240FDDB27
63534+:10067000240300013C0308008C6300D02416000111
63535+:100680001476FF7624D9FFFC2CD8000C1300FF72DF
63536+:10069000000621C030A5FFFF0A00150436A600029F
63537+:1006A00010B00037240F000B14AFFE23000731C039
63538+:1006B000312600FF00065600000A4E0305220047BF
63539+:1006C00030C6007F0006F8C03C16080126D69640CA
63540+:1006D00003F68021A2000001A20000003C0F600090
63541+:1006E0008DF918202405000100C588040011302769
63542+:1006F0000326C024000731C000C03821ADF81820FF
63543+:100700000A0013C8A60000028F850020000731C030
63544+:1007100024A2FFFF0A0013F6AF8200200A0014B2E1
63545+:100720002415002011E0FECC3C1980003728010080
63546+:100730009518001094B6000233120FFF16D2FEC6B1
63547+:10074000000000000A00148290A900013C0B080080
63548+:100750008D6B0038256DFFFF15600002018D1024A0
63549+:10076000000010213C080800250800380048C0217E
63550+:10077000930F000425EE00040A0014C5000E21C0EA
63551+:1007800000065202241F00FF115FFDEB000731C07D
63552+:10079000000A20C03C0E080125CE9640008EA821FC
63553+:1007A000009E602100095C02240D00013C076000EE
63554+:1007B000A2AD0000AD860000A2AB00018CF21820B3
63555+:1007C00024030001014310040242B025ACF61820B6
63556+:1007D00000C038210A0013C8A6A900020A0015AA01
63557+:1007E000AF8000200A0012FFAF84002C8F85000428
63558+:1007F0003C1980002408000337380180A308000B4F
63559+:100800000A00144D3C088000A2F8000B0A00155A9B
63560+:100810002419BFFF8F9600042412FFFE0A00144B18
63561+:1008200002D228242416FFFE0A00155800B62824F8
63562+:100830003C038000346401008C85000030A2003E3F
63563+:100840001440000800000000AC6000488C870000E5
63564+:1008500030E607C010C0000500000000AC60004C8E
63565+:10086000AC60005003E0000824020001AC600054BA
63566+:10087000AC6000408C880000310438001080FFF923
63567+:10088000000000002402000103E00008AC60004406
63568+:100890003C0380008C6201B80440FFFE3467018095
63569+:1008A000ACE4000024080001ACE00004A4E500086A
63570+:1008B00024050002A0E8000A34640140A0E5000B12
63571+:1008C0009483000A14C00008A4E30010ACE00024E4
63572+:1008D0003C07800034E901803C041000AD20002872
63573+:1008E00003E00008ACE401B88C8600043C0410006E
63574+:1008F000ACE600243C07800034E90180AD200028EC
63575+:1009000003E00008ACE401B83C0680008CC201B8EA
63576+:100910000440FFFE34C7018024090002ACE400005B
63577+:10092000ACE40004A4E50008A0E9000A34C50140D5
63578+:10093000A0E9000B94A8000A3C041000A4E80010F1
63579+:10094000ACE000248CA30004ACE3002803E0000822
63580+:10095000ACC401B83C039000346200010082202541
63581+:100960003C038000AC6400208C65002004A0FFFEE6
63582+:100970000000000003E00008000000003C028000CE
63583+:10098000344300010083202503E00008AC4400202C
63584+:1009900027BDFFE03C098000AFBF0018AFB10014D5
63585+:1009A000AFB00010352801408D10000091040009FF
63586+:1009B0009107000891050008308400FF30E600FF31
63587+:1009C00000061A002C820081008330251040002A86
63588+:1009D00030A50080000460803C0D080125AD92B078
63589+:1009E000018D58218D6A00000140000800000000C0
63590+:1009F0003C038000346201409445000A14A0001EAC
63591+:100A00008F91FCC09227000530E6000414C0001A44
63592+:100A1000000000000E00164E02002021922A000560
63593+:100A200002002021354900040E001658A2290005B5
63594+:100A30009228000531040004148000020000000028
63595+:100A40000000000D922D0000240B002031AC00FFAF
63596+:100A5000158B00093C0580008CAE01B805C0FFFE77
63597+:100A600034B10180AE3000003C0F100024100005AE
63598+:100A7000A230000BACAF01B80000000D8FBF001812
63599+:100A80008FB100148FB0001003E0000827BD0020D4
63600+:100A90000200202100C028218FBF00188FB1001450
63601+:100AA0008FB00010240600010A00161D27BD00208B
63602+:100AB0000000000D0200202100C028218FBF001877
63603+:100AC0008FB100148FB00010000030210A00161DF5
63604+:100AD00027BD002014A0FFE8000000000200202134
63605+:100AE0008FBF00188FB100148FB0001000C02821F4
63606+:100AF0000A00163B27BD00203C0780008CEE01B8A1
63607+:100B000005C0FFFE34F00180241F0002A21F000B6D
63608+:100B100034F80140A60600089719000A3C0F10009F
63609+:100B2000A61900108F110004A6110012ACEF01B835
63610+:100B30000A0016998FBF001827BDFFE8AFBF00104D
63611+:100B40000E000FD4000000003C0280008FBF001098
63612+:100B500000002021AC4001800A00108F27BD001842
63613+:100B60003084FFFF30A5FFFF108000070000182130
63614+:100B7000308200011040000200042042006518216C
63615+:100B80001480FFFB0005284003E0000800601021EE
63616+:100B900010C00007000000008CA2000024C6FFFF68
63617+:100BA00024A50004AC82000014C0FFFB24840004D0
63618+:100BB00003E000080000000010A0000824A3FFFFCD
63619+:100BC000AC86000000000000000000002402FFFFCF
63620+:100BD0002463FFFF1462FFFA2484000403E000088A
63621+:100BE000000000003C03800027BDFFF83462018054
63622+:100BF000AFA20000308C00FF30AD00FF30CE00FF10
63623+:100C00003C0B80008D6401B80480FFFE00000000F2
63624+:100C10008FA900008D6801288FAA00008FA700000F
63625+:100C20008FA400002405000124020002A085000A10
63626+:100C30008FA30000359940003C051000A062000B16
63627+:100C40008FB800008FAC00008FA600008FAF0000AF
63628+:100C500027BD0008AD280000AD400004AD80002491
63629+:100C6000ACC00028A4F90008A70D0010A5EE0012E2
63630+:100C700003E00008AD6501B83C06800827BDFFE829
63631+:100C800034C50080AFBF001090A7000924020012F5
63632+:100C900030E300FF1062000B008030218CA8005070
63633+:100CA00000882023048000088FBF00108CAA003425
63634+:100CB000240400390000282100CA4823052000052B
63635+:100CC000240600128FBF00102402000103E0000878
63636+:100CD00027BD00180E0016F2000000008FBF0010A4
63637+:100CE0002402000103E0000827BD001827BDFFC84B
63638+:100CF000AFB20030AFB00028AFBF0034AFB1002CAE
63639+:100D000000A0802190A5000D30A6001010C000109A
63640+:100D1000008090213C0280088C4400048E0300086F
63641+:100D20001064000C30A7000530A6000510C0009329
63642+:100D3000240400018FBF00348FB200308FB1002C2B
63643+:100D40008FB000280080102103E0000827BD003884
63644+:100D500030A7000510E0000F30AB001210C00006F5
63645+:100D6000240400013C0980088E0800088D25000439
63646+:100D70005105009C240400388FBF00348FB200302E
63647+:100D80008FB1002C8FB000280080102103E00008F4
63648+:100D900027BD0038240A0012156AFFE6240400016A
63649+:100DA0000200202127A500100E000CB6AFA00010F5
63650+:100DB0001440007C3C19800837240080909800087B
63651+:100DC000331100081220000A8FA7001030FF010025
63652+:100DD00013E000A48FA300148C8600580066102333
63653+:100DE000044000043C0A8008AC8300588FA7001020
63654+:100DF0003C0A800835480080910900083124000829
63655+:100E00001480000224080003000040213C1F8008D9
63656+:100E100093F1001193F9001237E600808CCC005456
63657+:100E2000333800FF03087821322D00FF000F708057
63658+:100E300001AE282100AC582B1160006F00000000AB
63659+:100E400094CA005C8CC900543144FFFF0125102373
63660+:100E50000082182B14600068000000008CCB005446
63661+:100E60000165182330EC00041180006C000830800C
63662+:100E70008FA8001C0068102B1040006230ED0004A9
63663+:100E8000006610232C46008010C00002004088211C
63664+:100E9000241100800E00164E024020213C0D8008D7
63665+:100EA00035A6008024070001ACC7000C90C80008DC
63666+:100EB0000011484035A70100310C007FA0CC00088C
63667+:100EC0008E05000424AB0001ACCB0030A4D1005C43
63668+:100ED0008CCA003C9602000E01422021ACC40020C6
63669+:100EE0008CC3003C0069F821ACDF001C8E190004A3
63670+:100EF000ACF900008E180008ACF800048FB10010A7
63671+:100F0000322F000855E0004793A60020A0C0004EF5
63672+:100F100090D8004E2411FFDFA0F8000890CF000801
63673+:100F200001F17024A0CE00088E0500083C0B80085B
63674+:100F300035690080AD2500388D6A00148D2200309F
63675+:100F40002419005001422021AD24003491230000D7
63676+:100F5000307F00FF13F90036264F01000E001658AF
63677+:100F60000240202124040038000028210E0016F23F
63678+:100F70002406000A0A001757240400010E000D2859
63679+:100F8000000020218FBF00348FB200308FB1002CC1
63680+:100F90008FB00028004020210080102103E00008CD
63681+:100FA00027BD00388E0E00083C0F800835F0008009
63682+:100FB000AE0E005402402021AE0000300E00164E4E
63683+:100FC00000000000920D00250240202135AC0020D9
63684+:100FD0000E001658A20C00250E000CAC0240202179
63685+:100FE000240400382405008D0E0016F22406001299
63686+:100FF0000A0017572404000194C5005C0A001792E8
63687+:1010000030A3FFFF2407021811A0FF9E00E6102363
63688+:101010008FAE001C0A00179A01C610230A0017970A
63689+:101020002C620218A0E600080A0017C48E0500080A
63690+:101030002406FF8001E6C0243C118000AE38002861
63691+:101040008E0D000831E7007F3C0E800C00EE602121
63692+:10105000AD8D00E08E080008AF8C00380A0017D074
63693+:10106000AD8800E4AC800058908500082403FFF7A9
63694+:1010700000A33824A08700080A0017758FA7001066
63695+:101080003C05080024A560A83C04080024846FF4F3
63696+:101090003C020800244260B0240300063C01080121
63697+:1010A000AC2596C03C010801AC2496C43C01080163
63698+:1010B000AC2296C83C010801A02396CC03E00008AE
63699+:1010C0000000000003E00008240200013C02800050
63700+:1010D000308800FF344701803C0680008CC301B893
63701+:1010E0000460FFFE000000008CC501282418FF806A
63702+:1010F0003C0D800A24AF010001F8702431EC007F20
63703+:10110000ACCE0024018D2021ACE50000948B00EAD8
63704+:101110003509600024080002316AFFFFACEA0004D0
63705+:1011200024020001A4E90008A0E8000BACE00024C0
63706+:101130003C071000ACC701B8AF84003803E00008DA
63707+:10114000AF85006C938800488F8900608F820038DB
63708+:1011500030C600FF0109382330E900FF01221821C1
63709+:1011600030A500FF2468008810C000020124382147
63710+:101170000080382130E400031480000330AA00030B
63711+:101180001140000D312B000310A0000900001021B8
63712+:1011900090ED0000244E000131C200FF0045602B9D
63713+:1011A000A10D000024E700011580FFF925080001CA
63714+:1011B00003E00008000000001560FFF300000000DD
63715+:1011C00010A0FFFB000010218CF80000245900043F
63716+:1011D000332200FF0045782BAD18000024E70004FF
63717+:1011E00015E0FFF92508000403E0000800000000F6
63718+:1011F00093850048938800588F8700600004320070
63719+:101200003103007F00E5102B30C47F001040000F39
63720+:10121000006428258F8400383C0980008C8A00EC0B
63721+:10122000AD2A00A43C03800000A35825AC6B00A0AD
63722+:101230008C6C00A00580FFFE000000008C6D00ACEF
63723+:10124000AC8D00EC03E000088C6200A80A00188254
63724+:101250008F840038938800593C0280000080502120
63725+:10126000310300FEA383005930ABFFFF30CC00FFF9
63726+:1012700030E7FFFF344801803C0980008D2401B82D
63727+:101280000480FFFE8F8D006C24180016AD0D000049
63728+:101290008D2201248F8D0038AD0200048D5900206D
63729+:1012A000A5070008240201C4A119000AA118000B17
63730+:1012B000952F01208D4E00088D4700049783005C18
63731+:1012C0008D59002401CF302100C7282100A32023FD
63732+:1012D0002418FFFFA504000CA50B000EA5020010AA
63733+:1012E000A50C0012AD190018AD18002495AF00E848
63734+:1012F0003C0B10002407FFF731EEFFFFAD0E002876
63735+:101300008DAC0084AD0C002CAD2B01B88D460020B7
63736+:1013100000C7282403E00008AD4500208F8800386E
63737+:101320000080582130E7FFFF910900D63C02800081
63738+:1013300030A5FFFF312400FF00041A00006750258C
63739+:1013400030C600FF344701803C0980008D2C01B875
63740+:101350000580FFFE8F82006C240F0017ACE20000B6
63741+:101360008D390124ACF900048D780020A4EA00082E
63742+:10137000241901C4A0F8000AA0EF000B9523012056
63743+:101380008D6E00088D6D00049784005C01C35021B0
63744+:10139000014D602101841023A4E2000CA4E5000E9D
63745+:1013A000A4F90010A4E60012ACE000148D7800242B
63746+:1013B000240DFFFFACF800188D0F007CACEF001C73
63747+:1013C0008D0E00783C0F1000ACEE0020ACED002438
63748+:1013D000950A00BE240DFFF73146FFFFACE600285A
63749+:1013E000950C00809504008231837FFF0003CA00C2
63750+:1013F0003082FFFF0322C021ACF8002CAD2F01B8D2
63751+:10140000950E00828D6A002000AE3021014D282407
63752+:10141000A506008203E00008AD6500203C028000C4
63753+:10142000344501803C0480008C8301B80460FFFED9
63754+:101430008F8A0044240600199549001C3128FFFFBB
63755+:10144000000839C0ACA70000A0A6000B3C051000A6
63756+:1014500003E00008AC8501B88F87004C0080402174
63757+:1014600030C400FF3C0680008CC201B80440FFFE7F
63758+:101470008F89006C9383006834996000ACA90000E8
63759+:10148000A0A300058CE20010240F00022403FFF744
63760+:10149000A4A20006A4B900088D180020A0B8000A74
63761+:1014A000A0AF000B8CEE0000ACAE00108CED000481
63762+:1014B000ACAD00148CEC001CACAC00248CEB002018
63763+:1014C000ACAB00288CEA002C3C071000ACAA002C26
63764+:1014D0008D090024ACA90018ACC701B88D05002007
63765+:1014E00000A3202403E00008AD0400208F8600380C
63766+:1014F00027BDFFE0AFB10014AFBF0018AFB00010C0
63767+:1015000090C300D430A500FF3062002010400008D6
63768+:10151000008088218CCB00D02409FFDF256A0001E0
63769+:10152000ACCA00D090C800D401093824A0C700D4A8
63770+:1015300014A000403C0C80008F840038908700D4B9
63771+:101540002418FFBF2406FFEF30E3007FA08300D400
63772+:10155000979F005C8F8200608F8D003803E2C82364
63773+:10156000A799005CA5A000BC91AF00D401F870243D
63774+:10157000A1AE00D48F8C0038A18000D78F8A0038AC
63775+:10158000A5400082AD4000EC914500D400A658244F
63776+:10159000A14B00D48F9000348F8400609786005C4C
63777+:1015A0000204282110C0000FAF850034A38000582A
63778+:1015B0003C0780008E2C000894ED01208E2B000447
63779+:1015C000018D5021014B8021020620233086FFFF30
63780+:1015D00030C8000F3909000131310001162000091F
63781+:1015E000A3880058938600488FBF00188FB100145D
63782+:1015F0008FB0001027BD0020AF85006403E0000815
63783+:10160000AF86006000C870238FBF00189386004823
63784+:101610008FB100148FB0001034EF0C00010F28219F
63785+:1016200027BD0020ACEE0084AF85006403E0000815
63786+:10163000AF86006035900180020028210E00190F4E
63787+:10164000240600828F840038908600D430C5004084
63788+:1016500050A0FFBAA38000688F85004C3C06800034
63789+:101660008CCD01B805A0FFFE8F89006C2408608234
63790+:1016700024070002AE090000A6080008A207000B1C
63791+:101680008CA300083C0E1000AE0300108CA2000CCE
63792+:10169000AE0200148CBF0014AE1F00188CB90018E5
63793+:1016A000AE1900248CB80024AE1800288CAF002896
63794+:1016B000AE0F002CACCE01B80A001948A380006818
63795+:1016C0008F8A003827BDFFE0AFB10014AFB0001023
63796+:1016D0008F880060AFBF00189389003C954200BC22
63797+:1016E00030D100FF0109182B0080802130AC00FFB1
63798+:1016F0003047FFFF0000582114600003310600FF4F
63799+:1017000001203021010958239783005C0068202BB9
63800+:101710001480002700000000106800562419000102
63801+:101720001199006334E708803165FFFF0E0018C08F
63802+:10173000020020218F83006C3C07800034E601808A
63803+:101740003C0580008CAB01B80560FFFE240A001840
63804+:101750008F840038ACC30000A0CA000B948900BE7F
63805+:101760003C081000A4C90010ACC00030ACA801B8FF
63806+:101770009482008024430001A4830080949F008011
63807+:101780003C0608008CC6318833EC7FFF1186005E72
63808+:101790000000000002002021022028218FBF001835
63809+:1017A0008FB100148FB000100A00193427BD00203B
63810+:1017B000914400D42403FF8000838825A15100D4E4
63811+:1017C0009784005C3088FFFF51000023938C003C1D
63812+:1017D0008F8500382402EFFF008B782394AE00BC85
63813+:1017E0000168502B31E900FF01C26824A4AD00BCA0
63814+:1017F00051400039010058213C1F800037E60100AC
63815+:101800008CD800043C190001031940245500000144
63816+:1018100034E740008E0A00202403FFFB241100015E
63817+:1018200001432024AE0400201191002D34E78000F4
63818+:1018300002002021012030210E0018C03165FFFF79
63819+:101840009787005C8F890060A780005C0127802358
63820+:10185000AF900060938C003C8F8B00388FBF0018D6
63821+:101860008FB100148FB0001027BD002003E00008E6
63822+:10187000A16C00D73C0D800035AA01008D48000402
63823+:101880003C0900010109282454A0000134E740006C
63824+:101890008E0F00202418FFFB34E7800001F870242D
63825+:1018A00024190001AE0E00201599FF9F34E708802F
63826+:1018B000020020210E00188E3165FFFF020020215A
63827+:1018C000022028218FBF00188FB100148FB00010A4
63828+:1018D0000A00193427BD00200A0019F7000048212A
63829+:1018E00002002021012030210E00188E3165FFFFFB
63830+:1018F0009787005C8F890060A780005C01278023A8
63831+:101900000A001A0EAF900060948C0080241F8000A3
63832+:10191000019F3024A4860080908B0080908F0080EF
63833+:10192000316700FF0007C9C20019C027001871C045
63834+:1019300031ED007F01AE2825A08500800A0019DF67
63835+:1019400002002021938500682403000127BDFFE8E1
63836+:1019500000A330042CA20020AFB00010AFBF0014D1
63837+:1019600000C01821104000132410FFFE3C0708009F
63838+:101970008CE7319000E610243C088000350501809A
63839+:1019800014400005240600848F890038240A0004CE
63840+:101990002410FFFFA12A00FC0E00190F0000000018
63841+:1019A000020010218FBF00148FB0001003E0000868
63842+:1019B00027BD00183C0608008CC631940A001A574F
63843+:1019C00000C310248F87004427BDFFE0AFB200188A
63844+:1019D000AFB10014AFB00010AFBF001C30D000FF9B
63845+:1019E00090E6000D00A088210080902130C5007F86
63846+:1019F000A0E5000D8F8500388E2300188CA200D042
63847+:101A00001062002E240A000E0E001A4AA38A0068F3
63848+:101A10002409FFFF104900222404FFFF5200002088
63849+:101A2000000020218E2600003C0C001000CC582421
63850+:101A3000156000393C0E000800CE682455A0003F18
63851+:101A4000024020213C18000200D880241200001F10
63852+:101A50003C0A00048F8700448CE200148CE30010E1
63853+:101A60008CE500140043F82303E5C82B1320000580
63854+:101A7000024020218E24002C8CF1001010910031A6
63855+:101A80000240202124020012A38200680E001A4A9C
63856+:101A90002412FFFF105200022404FFFF0000202147
63857+:101AA0008FBF001C8FB200188FB100148FB00010D0
63858+:101AB0000080102103E0000827BD002090A800D47A
63859+:101AC000350400200A001A80A0A400D400CA4824CB
63860+:101AD0001520000B8F8B00448F8D00448DAC0010BF
63861+:101AE0001580000B024020218E2E002C51C0FFECEF
63862+:101AF00000002021024020210A001A9B2402001726
63863+:101B00008D66001050C0FFE6000020210240202119
63864+:101B10000A001A9B24020011024020212402001511
63865+:101B20000E001A4AA3820068240FFFFF104FFFDC4B
63866+:101B30002404FFFF0A001A8A8E2600000A001AC138
63867+:101B4000240200143C08000400C8382450E0FFD4EC
63868+:101B500000002021024020210A001A9B24020013C9
63869+:101B60008F85003827BDFFD8AFB3001CAFB2001877
63870+:101B7000AFB10014AFB00010AFBF002090A700D4E9
63871+:101B80008F90004C2412FFFF34E2004092060000C8
63872+:101B9000A0A200D48E0300100080982110720006CD
63873+:101BA00030D1003F2408000D0E001A4AA3880068B7
63874+:101BB000105200252404FFFF8F8A00388E09001878
63875+:101BC0008D4400D01124000702602021240C000E57
63876+:101BD0000E001A4AA38C0068240BFFFF104B001A5A
63877+:101BE0002404FFFF24040020122400048F8D0038F9
63878+:101BF00091AF00D435EE0020A1AE00D48F85005403
63879+:101C000010A00019000000001224004A8F9800382C
63880+:101C10008F92FCC0971000809651000A5230004805
63881+:101C20008F9300403C1F08008FFF318C03E5C82BC9
63882+:101C30001720001E02602021000028210E0019A993
63883+:101C400024060001000020218FBF00208FB3001C5C
63884+:101C50008FB200188FB100148FB0001000801021D7
63885+:101C600003E0000827BD00285224002A8E05001436
63886+:101C70008F840038948A008025490001A48900805F
63887+:101C8000948800803C0208008C42318831077FFF35
63888+:101C900010E2000E00000000026020210E00193446
63889+:101CA000240500010A001B0B000020212402002D46
63890+:101CB0000E001A4AA38200682403FFFF1443FFE1C9
63891+:101CC0002404FFFF0A001B0C8FBF002094990080A2
63892+:101CD000241F800024050001033FC024A498008035
63893+:101CE00090920080908E0080325100FF001181C2DE
63894+:101CF00000107827000F69C031CC007F018D582576
63895+:101D0000A08B00800E001934026020210A001B0BFA
63896+:101D1000000020212406FFFF54A6FFD68F84003840
63897+:101D2000026020210E001934240500010A001B0B5B
63898+:101D300000002021026020210A001B252402000A45
63899+:101D40002404FFFD0A001B0BAF9300608F8800384E
63900+:101D500027BDFFE8AFB00010AFBF0014910A00D458
63901+:101D60008F87004C00808021354900408CE60010B0
63902+:101D7000A10900D43C0208008C4231B030C53FFFBD
63903+:101D800000A2182B106000078F850050240DFF80E3
63904+:101D900090AE000D01AE6024318B00FF156000088D
63905+:101DA0000006C382020020212403000D8FBF00140F
63906+:101DB0008FB0001027BD00180A001A4AA3830068DC
63907+:101DC00033060003240F000254CFFFF70200202146
63908+:101DD00094A2001C8F85003824190023A4A200E8D7
63909+:101DE0008CE8000000081E02307F003F13F9003528
63910+:101DF0003C0A00838CE800188CA600D0110600086D
63911+:101E0000000000002405000E0E001A4AA385006899
63912+:101E10002407FFFF104700182404FFFF8F850038B8
63913+:101E200090A900D435240020A0A400D48F8C0044B5
63914+:101E3000918E000D31CD007FA18D000D8F83005458
63915+:101E40001060001C020020218F8400508C9800102C
63916+:101E50000303782B11E0000D241900180200202143
63917+:101E6000A39900680E001A4A2410FFFF10500002C8
63918+:101E70002404FFFF000020218FBF00148FB000104A
63919+:101E80000080102103E0000827BD00188C86001098
63920+:101E90008F9F00440200202100C31023AFE20010F6
63921+:101EA000240500010E0019A9240600010A001B9751
63922+:101EB000000020210E001934240500010A001B97A0
63923+:101EC00000002021010A5824156AFFD98F8C004494
63924+:101ED000A0A600FC0A001B84A386005A30A500FFC0
63925+:101EE0002406000124A9000100C9102B1040000C99
63926+:101EF00000004021240A000100A61823308B0001B5
63927+:101F000024C60001006A3804000420421160000267
63928+:101F100000C9182B010740251460FFF800A61823FC
63929+:101F200003E000080100102127BDFFD8AFB0001862
63930+:101F30008F90004CAFB1001CAFBF00202403FFFF07
63931+:101F40002411002FAFA30010920600002405000802
63932+:101F500026100001006620260E001BB0308400FF12
63933+:101F600000021E003C021EDC34466F410A001BD8F2
63934+:101F70000000102110A00009008018212445000154
63935+:101F800030A2FFFF2C4500080461FFFA0003204047
63936+:101F90000086202614A0FFF9008018210E001BB037
63937+:101FA000240500208FA300102629FFFF313100FFF8
63938+:101FB00000034202240700FF1627FFE20102182651
63939+:101FC00000035027AFAA0014AFAA00100000302170
63940+:101FD00027A8001027A7001400E6782391ED00033E
63941+:101FE00024CE000100C8602131C600FF2CCB0004C4
63942+:101FF0001560FFF9A18D00008FA200108FBF002097
63943+:102000008FB1001C8FB0001803E0000827BD002826
63944+:1020100027BDFFD0AFB3001CAFB00010AFBF00288A
63945+:10202000AFB50024AFB40020AFB20018AFB10014B8
63946+:102030003C0C80008D880128240FFF803C06800A1C
63947+:1020400025100100250B0080020F68243205007F57
63948+:10205000016F7024AD8E009000A62821AD8D002464
63949+:1020600090A600FC3169007F3C0A8004012A1821F7
63950+:10207000A386005A9067007C00809821AF830030CF
63951+:1020800030E20002AF88006CAF85003800A0182154
63952+:10209000144000022404003424040030A3840048C7
63953+:1020A0008C7200DC30D100FF24040004AF92006089
63954+:1020B00012240004A38000688E7400041680001EA1
63955+:1020C0003C0880009386005930C7000110E0000FE3
63956+:1020D0008F9300608CB000848CA800842404FF805F
63957+:1020E000020410240002F940310A007F03EA482567
63958+:1020F0003C0C2000012C902530CD00FE3C038000DC
63959+:10210000AC720830A38D00598F9300608FBF0028F8
63960+:102110008FB50024ACB300DC8FB400208FB3001C5B
63961+:102120008FB200188FB100148FB00010240200018C
63962+:1021300003E0000827BD00308E7F000895020120D3
63963+:102140008E67001003E2C8213326FFFF30D8000F4E
63964+:1021500033150001AF87003416A00058A39800582B
63965+:1021600035090C000309382100D81823AD03008479
63966+:10217000AF8700648E6A00043148FFFF1100007EC3
63967+:10218000A78A005C90AC00D42407FF8000EC3024C8
63968+:1021900030CB00FF1560004B9786005C938E005A91
63969+:1021A000240D000230D5FFFF11CD02A20000A021B6
63970+:1021B0008F85006002A5802B160000BC9388004824
63971+:1021C0003C11800096240120310400FF1485008812
63972+:1021D0008F8400648F9800343312000356400085CA
63973+:1021E00030A500FF8F900064310C00FF24060034FE
63974+:1021F00011860095AF90004C9204000414800118E1
63975+:102200008F8E0038A380003C8E0D00048DC800D84E
63976+:102210003C0600FF34CCFFFF01AC30240106182B34
63977+:1022200014600120AF8600548F8700609798005C8F
63978+:10223000AF8700400307402310C000C7A788005C99
63979+:102240008F91003030C3000300035823922A007C92
63980+:102250003171000302261021000A20823092000111
63981+:102260000012488000492821311FFFFF03E5C82BD9
63982+:10227000132001208F8800388F8500348F880064F8
63983+:102280001105025A3C0E3F018E0600003C0C250051
63984+:1022900000CE682411AC01638F84004C30E500FF50
63985+:1022A0000E00184A000030218F8800388F870060A8
63986+:1022B0008F8500340A001DB78F8600540A001C5613
63987+:1022C000AF87006490A400D400E48024320200FFB1
63988+:1022D000104000169386005990A6008890AE00D753
63989+:1022E00024A8008830D4003F2686FFE02CD10020AF
63990+:1022F000A38E003C1220000CAF88004C240B000180
63991+:1023000000CB20043095001916A0012B3C0680005C
63992+:1023100034CF0002008FC0241700022E3099002015
63993+:1023200017200234000000009386005930CB0001D2
63994+:102330001160000F9788005C8CBF00848CA900841A
63995+:10234000240AFF8003EA6024000C19403132007F28
63996+:10235000007238253C0D200000EDC82530D800FE65
63997+:102360003C0F8000ADF90830A39800599788005CB5
63998+:102370001500FF84000000008E630020306200041E
63999+:102380001040FF51938600592404FFFB0064802411
64000+:102390003C038000AE700020346601808C7301B86D
64001+:1023A0000660FFFE8F98006C347501003C1400013C
64002+:1023B000ACD800008C6B012424076085ACCB0004F2
64003+:1023C0008EAE000401D488245220000124076083CB
64004+:1023D00024190002A4C700083C0F1000A0D9000B6C
64005+:1023E0003C068000ACCF01B80A001C2B9386005934
64006+:1023F00030A500FF0E00184A240600018F88006CEB
64007+:102400003C05800034A90900250201889388004812
64008+:10241000304A0007304B00783C0340802407FF809F
64009+:102420000163C825014980210047F824310C00FFD1
64010+:1024300024060034ACBF0800AF90004CACB90810C3
64011+:102440005586FF6E920400048F8400388E11003090
64012+:10245000908E00D431CD001015A000108F83006045
64013+:102460002C6F000515E000E400000000909800D4F7
64014+:102470002465FFFC331200101640000830A400FF52
64015+:102480008F9F00648F99003413F90004388700018E
64016+:1024900030E20001144001C8000000000E001BC320
64017+:1024A000000000000A001DF8000000008F84006496
64018+:1024B00030C500FF0E00184A24060001939800481A
64019+:1024C000240B0034130B00A08F8500388F8600602A
64020+:1024D0009783005C306EFFFF00CE8823AF910060D1
64021+:1024E000A780005C1280FF90028018212414FFFD59
64022+:1024F0005474FFA28E6300208E6A00042403FFBF81
64023+:102500002408FFEF0155F823AE7F000490AC00D4FF
64024+:102510003189007FA0A900D48E7200208F8F0038EF
64025+:10252000A780005C364D0002AE6D0020A5E000BC27
64026+:1025300091E500D400A3C824A1F900D48F950038F8
64027+:10254000AEA000EC92B800D403085824A2AB00D48B
64028+:102550000A001CD78F8500388F910034AF8000604F
64029+:1025600002275821AF8B0034000020212403FFFFF5
64030+:10257000108301B48F8500388E0C00103C0D0800CC
64031+:102580008DAD31B09208000031843FFF008D802B6B
64032+:1025900012000023310D003F3C1908008F3931A88B
64033+:1025A0008F9F006C000479802408FF80033F202166
64034+:1025B000008FC821938500590328F8243C06008029
64035+:1025C0003C0F800034D80001001F91403331007F60
64036+:1025D0008F8600380251502535EE0940332B0078A4
64037+:1025E000333000073C0310003C02800C017890253A
64038+:1025F000020E48210143C0250222382134AE0001D9
64039+:10260000ADFF0804AF890050ADF20814AF87004455
64040+:10261000ADFF0028ACD90084ADF80830A38E005976
64041+:102620009383005A24070003106700272407000142
64042+:102630001467FFAC8F8500382411002311B1008589
64043+:1026400000000000240E000B026020210E001A4A38
64044+:10265000A38E00680040A0210A001D328F8500383B
64045+:1026600002602021240B000C0E001A4AA38B006884
64046+:10267000240AFFFF104AFFBD2404FFFF8F8E00389D
64047+:10268000A380003C8E0D00048DC800D83C0600FFDE
64048+:1026900034CCFFFF01AC30240106182B1060FEE2A1
64049+:1026A000AF86005402602021241200190E001A4A3D
64050+:1026B000A3920068240FFFFF104FFFAC2404FFFF1C
64051+:1026C0000A001C838F86005425A3FFE02C74002091
64052+:1026D0001280FFDD240E000B000328803C1108014E
64053+:1026E000263194B400B148218D2D000001A00008CE
64054+:1026F000000000008F85003400A710219385003C66
64055+:10270000AF82003402251821A383003C951F00BC32
64056+:102710000226282137F91000A51900BC5240FF926B
64057+:10272000AF850060246A0004A38A003C950900BCC0
64058+:1027300024A40004AF84006035322000A51200BC40
64059+:102740000A001D54000020218F8600602CC800055F
64060+:102750001500FF609783005C3065FFFF00C5C8234C
64061+:102760002F2F000511E00003306400FF24CDFFFC93
64062+:1027700031A400FF8F8900648F920034113200046D
64063+:10278000389F000133EC0001158001380000000083
64064+:102790008F840038908700D434E60010A08600D4DF
64065+:1027A0008F8500388F8600609783005CACA000ECBA
64066+:1027B0000A001D2F306EFFFF8CB500848CB400849E
64067+:1027C0003C04100002A7302400068940328E007FAE
64068+:1027D000022E8025020410253C08800024050001FB
64069+:1027E00002602021240600010E0019A9AD02083064
64070+:1027F0000A001CC38F8500388C8200EC1222FE7EFA
64071+:102800000260202124090005A38900680E001A4AED
64072+:102810002411FFFF1451FE782404FFFF0A001D5508
64073+:102820002403FFFF8F8F004C8F8800388DF8000045
64074+:10283000AD1800888DE70010AD0700988F87006005
64075+:102840000A001DB78F8600542406FFFF118600057D
64076+:10285000000000000E001B4C026020210A001D8FAA
64077+:102860000040A0210E001AD1026020210A001D8F15
64078+:102870000040A0218F90004C3C0208008C4231B0F7
64079+:102880008E110010322C3FFF0182282B10A0000C6B
64080+:10289000240BFF808F85005090A3000D01637024EE
64081+:1028A00031CA00FF1140000702602021001143825D
64082+:1028B000310600032418000110D8010600000000B2
64083+:1028C000026020212403000D0E001A4AA383006831
64084+:1028D000004020218F8500380A001D320080A02191
64085+:1028E0008F90004C3C0A08008D4A31B08F85005013
64086+:1028F0008E0400100000A0218CB1001430823FFF34
64087+:10290000004A602B8CB200205180FFEE0260202133
64088+:1029100090B8000D240BFF800178702431C300FFB4
64089+:102920005060FFE80260202100044382310600036A
64090+:1029300014C0FFE40260202194BF001C8F9900386E
64091+:102940008E060028A73F00E88CAF0010022F20233E
64092+:1029500014C4013A026020218F83005400C368210F
64093+:10296000022D382B14E00136240200188F8A00440F
64094+:102970008F820030024390218D4B00100163702341
64095+:10298000AD4E0010AD5200208C4C00740192282BEB
64096+:1029900014A0015F026020218F8400508E08002463
64097+:1029A0008C86002411060007026020212419001CD7
64098+:1029B0000E001A4AA3990068240FFFFF104FFFC5AD
64099+:1029C0002404FFFF8F8400448C87002424FF00012F
64100+:1029D000AC9F00241251012F8F8D00308DB10074F7
64101+:1029E0001232012C3C0B00808E0E000001CB5024D3
64102+:1029F00015400075000000008E0300142411FFFF35
64103+:102A0000107100073C0808003C0608008CC6319095
64104+:102A100000C8C0241300015202602021A380006876
64105+:102A20008E0300003C19000100792024108000135F
64106+:102A30003C1F0080007FA02416800009020028218E
64107+:102A4000026020212411001A0E001A4AA391006886
64108+:102A50002407FFFF1047FF9F2404FFFF02002821E7
64109+:102A6000026020210E001A6A240600012410FFFFD4
64110+:102A70001050FF982404FFFF241400018F8D0044A0
64111+:102A8000026020210280302195A900342405000134
64112+:102A9000253200010E0019A9A5B200340000202142
64113+:102AA0008F8500380A001D320080A0218F90004CD5
64114+:102AB0003C1408008E9431B08E07001030E53FFFC3
64115+:102AC00000B4C82B132000618F8600502412FF80B1
64116+:102AD00090C9000D0249682431A400FF5080005CB9
64117+:102AE000026020218F8C00541180000700078B8228
64118+:102AF0008F8500388F82FCC094BF0080944A000A02
64119+:102B0000515F00F78F8600403227000314E0006415
64120+:102B100000000000920E000211C000D8000000006A
64121+:102B20008E0B0024156000D902602021920400035E
64122+:102B300024190002308500FF14B90005308900FF18
64123+:102B40008F940054128000EA240D002C308900FF7D
64124+:102B5000392C00102D8400012D3200010244302553
64125+:102B6000020028210E001A6A026020212410FFFFB3
64126+:102B7000105000BF8F8500388F830054106000D341
64127+:102B8000240500013C0A08008D4A318C0143F82BD2
64128+:102B900017E000B22402002D02602021000028214D
64129+:102BA0000E0019A9240600018F85003800001821A5
64130+:102BB0000A001D320060A0210E0018750000000000
64131+:102BC0000A001DF800000000AC8000200A001E78FA
64132+:102BD0008E03001400002821026020210E0019A994
64133+:102BE000240600010A001CC38F8500380A001DB7A7
64134+:102BF0008F8800388CAA00848CAC00843C031000C1
64135+:102C00000147F824001F91403189007F024968255F
64136+:102C100001A32825ACC50830910700012405000157
64137+:102C2000026020210E0019A930E600010A001CC331
64138+:102C30008F850038938F00482403FFFD0A001D3460
64139+:102C4000AF8F00600A001D342403FFFF02602021C3
64140+:102C50002410000D0E001A4AA390006800401821AD
64141+:102C60008F8500380A001D320060A0210E00187503
64142+:102C7000000000009783005C8F86006000402021E8
64143+:102C80003070FFFF00D010232C4A00051140FE11C8
64144+:102C90008F850038ACA400EC0A001D2F306EFFFFBA
64145+:102CA00090CF000D31E300085460FFA192040003AF
64146+:102CB00002602021240200100E001A4AA38200683C
64147+:102CC0002403FFFF5443FF9A920400030A001F12DB
64148+:102CD0008F85003890A4000D308F000811E000951A
64149+:102CE0008F990054572000A6026020218E1F000CEF
64150+:102CF0008CB4002057F40005026020218E0D0008DE
64151+:102D00008CA7002411A7003A026020212402002091
64152+:102D1000A38200680E001A4A2412FFFF1052FEED33
64153+:102D20002404FFFF8F9F00442402FFF73C14800E11
64154+:102D300093EA000D2419FF803C03800001423824EF
64155+:102D4000A3E7000D8F9F00303C0908008D2931ACAE
64156+:102D50008F8C006C97F200788F870044012C302113
64157+:102D6000324D7FFF000D204000C4782131E5007F07
64158+:102D700000B4C02101F94024AC68002CA711000068
64159+:102D80008CEB0028256E0001ACEE00288CEA002CAC
64160+:102D90008E02002C01426021ACEC002C8E09002C2C
64161+:102DA000ACE900308E120014ACF2003494ED003A1D
64162+:102DB00025A40001A4E4003A97E600783C1108003D
64163+:102DC0008E3131B024C3000130707FFF1211005CDE
64164+:102DD000006030218F8F0030026020212405000127
64165+:102DE0000E001934A5E600780A001EA1000020217B
64166+:102DF0008E0900142412FFFF1132006B8F8A0038F5
64167+:102E00008E0200188D4C00D0144C00650260202109
64168+:102E10008E0B00248CAE0028116E005B2402002172
64169+:102E20000E001A4AA38200681452FFBE2404FFFF5A
64170+:102E30008F8500380A001D320080A0212402001F67
64171+:102E40000E001A4AA38200682409FFFF1049FEA160
64172+:102E50002404FFFF0A001E548F83005402602021C7
64173+:102E60000E001A4AA38200681450FF508F85003864
64174+:102E70002403FFFF0A001D320060A0218CD800242B
64175+:102E80008E0800241118FF29026020210A001F2744
64176+:102E90002402000F8E0900003C05008001259024CB
64177+:102EA0001640FF492402001A026020210E001A4A2F
64178+:102EB000A3820068240CFFFF144CFECF2404FFFF04
64179+:102EC0008F8500380A001D320080A0210E001934C1
64180+:102ED000026020218F8500380A001EE500001821BD
64181+:102EE0002403FFFD0060A0210A001D32AF860060B0
64182+:102EF000026020210E001A4AA38D00682403FFFF00
64183+:102F00001043FF588F8500380A001ECC920400033E
64184+:102F10002418001D0E001A4AA39800682403FFFF1E
64185+:102F20001443FE9D2404FFFF8F8500380A001D32E4
64186+:102F30000080A021026020210A001F3D24020024FD
64187+:102F4000240880000068C024330BFFFF000B73C20D
64188+:102F500031D000FF001088270A001F6E001133C017
64189+:102F6000240F001B0E001A4AA38F00681451FEACF8
64190+:102F70002404FFFF8F8500380A001D320080A02145
64191+:102F80000A001F3D240200278E0600288CA3002C77
64192+:102F900010C30008026020210A001F812402001FC4
64193+:102FA0000A001F812402000E026020210A001F81F6
64194+:102FB000240200258E04002C1080000D8F8F00301D
64195+:102FC0008DE800740104C02B5700000C0260202122
64196+:102FD0008CB900140086A0210334282B10A0FF52C6
64197+:102FE0008F9F0044026020210A001F8124020022DA
64198+:102FF000026020210A001F81240200230A001F8191
64199+:103000002402002627BDFFD8AFB3001CAFB10014C7
64200+:10301000AFBF0020AFB20018AFB000103C0280007C
64201+:103020008C5201408C4B01483C048000000B8C0208
64202+:10303000322300FF317300FF8C8501B804A0FFFE2E
64203+:1030400034900180AE1200008C8701442464FFF0AC
64204+:10305000240600022C830013AE070004A61100080A
64205+:10306000A206000BAE1300241060004F8FBF00209B
64206+:10307000000448803C0A0801254A9534012A402171
64207+:103080008D04000000800008000000003C030800E0
64208+:103090008C6331A831693FFF00099980007280215B
64209+:1030A000021370212405FF80264D0100264C00806C
64210+:1030B0003C02800031B1007F3198007F31CA007F2F
64211+:1030C0003C1F800A3C1980043C0F800C01C5202461
64212+:1030D00001A5302401853824014F1821AC46002475
64213+:1030E000023F402103194821AC470090AC4400281E
64214+:1030F000AF830044AF880038AF8900300E0019005C
64215+:10310000016080213C0380008C6B01B80560FFFEEC
64216+:103110008F8700448F8600383465018090E8000D69
64217+:10312000ACB20000A4B0000600082600000416039C
64218+:1031300000029027001227C21080008124C200885C
64219+:10314000241F6082A4BF0008A0A000052402000282
64220+:10315000A0A2000B8F8B0030000424003C08270045
64221+:1031600000889025ACB20010ACA00014ACA00024E4
64222+:10317000ACA00028ACA0002C8D6900382413FF807F
64223+:10318000ACA9001890E3000D02638024320500FF13
64224+:1031900010A000058FBF002090ED000D31AC007F26
64225+:1031A000A0EC000D8FBF00208FB3001C8FB2001861
64226+:1031B0008FB100148FB000103C0A10003C0E80004C
64227+:1031C00027BD002803E00008ADCA01B8265F010052
64228+:1031D0002405FF8033F8007F3C06800003E5782457
64229+:1031E0003C19800A03192021ACCF0024908E00D412
64230+:1031F00000AE682431AC00FF11800024AF84003899
64231+:10320000248E008895CD00123C0C08008D8C31A8CE
64232+:1032100031AB3FFF01924821000B5180012A402130
64233+:1032200001052024ACC400283107007F3C06800C37
64234+:1032300000E620219083000D00A31024304500FFFC
64235+:1032400010A0FFD8AF8400449098000D330F0010F9
64236+:1032500015E0FFD58FBF00200E0019000000000010
64237+:103260003C0380008C7901B80720FFFE00000000BD
64238+:10327000AE1200008C7F0144AE1F0004A6110008AE
64239+:1032800024110002A211000BAE1300243C1308010C
64240+:10329000927396F0327000015200FFC38FBF00207E
64241+:1032A0000E002146024020210A0020638FBF00202B
64242+:1032B0003C1260008E452C083C03F0033462FFFF93
64243+:1032C00000A2F824AE5F2C088E582C083C1901C0CF
64244+:1032D00003199825AE532C080A0020638FBF0020E5
64245+:1032E000264D010031AF007F3C10800A240EFF8084
64246+:1032F00001F0282101AE60243C0B8000AD6C00245D
64247+:103300001660FFA8AF85003824110003A0B100FCAF
64248+:103310000A0020638FBF002026480100310A007F89
64249+:103320003C0B800A2409FF80014B30210109202435
64250+:103330003C078000ACE400240A002062AF8600381D
64251+:10334000944E0012320C3FFF31CD3FFF15ACFF7D94
64252+:10335000241F608290D900D42418FF800319782498
64253+:1033600031EA00FF1140FF7700000000240700044D
64254+:10337000A0C700FC8F870044241160842406000D40
64255+:10338000A4B10008A0A600050A00204D24020002F6
64256+:103390003C040001248496DC24030014240200FE73
64257+:1033A0003C010800AC2431EC3C010800AC2331E8BE
64258+:1033B0003C010801A42296F83C040801248496F8F4
64259+:1033C0000000182100643021A0C300042463000120
64260+:1033D0002C6500FF54A0FFFC006430213C0708006E
64261+:1033E00024E7010003E00008AF87007800A058211F
64262+:1033F000008048210000102114A00012000050217C
64263+:103400000A002142000000003C010801A42096F8B7
64264+:103410003C05080194A596F88F8200783C0C0801C1
64265+:10342000258C96F800E2182100AC2021014B302BAE
64266+:10343000A089000400001021A460000810C0003919
64267+:10344000010048218F8600780009384000E94021BA
64268+:103450000008388000E6282190A8000B90B9000AE7
64269+:103460000008204000881021000218800066C0215A
64270+:10347000A319000A8F85007800E5782191EE000AF3
64271+:1034800091E6000B000E684001AE6021000C208028
64272+:1034900000851021A046000B3C030801906396F2C2
64273+:1034A000106000222462FFFF8F8300383C01080176
64274+:1034B000A02296F2906C00FF118000040000000032
64275+:1034C000906E00FF25CDFFFFA06D00FF3C190801A5
64276+:1034D000973996F8272300013078FFFF2F0F00FF60
64277+:1034E00011E0FFC9254A00013C010801A42396F818
64278+:1034F0003C05080194A596F88F8200783C0C0801E1
64279+:10350000258C96F800E2182100AC2021014B302BCD
64280+:10351000A089000400001021A460000814C0FFC9A5
64281+:103520000100482103E000080000000003E000085B
64282+:103530002402000227BDFFE0248501002407FF804C
64283+:10354000AFB00010AFBF0018AFB1001400A718242F
64284+:103550003C10800030A4007F3C06800A00862821B1
64285+:103560008E110024AE03002490A200FF1440000836
64286+:10357000AF850038A0A000098FBF0018AE1100244D
64287+:103580008FB100148FB0001003E0000827BD0020A9
64288+:1035900090A900FD90A800FF312400FF0E0020F448
64289+:1035A000310500FF8F8500388FBF0018A0A00009EB
64290+:1035B000AE1100248FB100148FB0001003E000089A
64291+:1035C00027BD002027BDFFD0AFB20020AFB1001C47
64292+:1035D000AFB00018AFBF002CAFB40028AFB30024C9
64293+:1035E0003C0980009533011635320C00952F011AE5
64294+:1035F0003271FFFF023280218E08000431EEFFFF9E
64295+:10360000248B0100010E6821240CFF8025A5FFFFFB
64296+:10361000016C50243166007F3C07800AAD2A0024EB
64297+:1036200000C73021AF850074AF8800703C010801ED
64298+:10363000A02096F190C300090200D02100809821BB
64299+:10364000306300FF2862000510400048AF86003854
64300+:10365000286400021480008E24140001240D00054B
64301+:103660003C010801A02D96D590CC00FD3C0108013D
64302+:10367000A02096D63C010801A02096D790CB000A46
64303+:10368000240AFF80318500FF014B4824312700FFC9
64304+:1036900010E0000C000058213C12800836510080D8
64305+:1036A0008E2F00308CD0005C01F0702305C0018E9D
64306+:1036B0008F87007090D4000A3284007FA0C4000A73
64307+:1036C0008F8600383C118008363000808E0F003025
64308+:1036D0008F87007000EF702319C000EE000000001B
64309+:1036E00090D4000924120002328400FF1092024795
64310+:1036F000000000008CC2005800E2F82327F9FFFF09
64311+:103700001B2001300000000090C5000924080004BF
64312+:1037100030A300FF10680057240A00013C01080193
64313+:10372000A02A96D590C900FF252700013C01080179
64314+:10373000A02796D43C030801906396D52406000583
64315+:103740001066006A2C780005130000C40000902168
64316+:103750000003F8803C0408012484958003E4C82118
64317+:103760008F25000000A0000800000000241800FFC2
64318+:103770001078005C0000000090CC000A90CA00099C
64319+:103780003C080801910896F13187008000EA48253D
64320+:103790003C010801A02996DC90C500FD3C140801FD
64321+:1037A000929496F2311100013C010801A02596DDAA
64322+:1037B00090DF00FE3C010801A03F96DE90D200FFA2
64323+:1037C0003C010801A03296DF8CD900543C0108016D
64324+:1037D000AC3996E08CD000583C010801AC3096E43E
64325+:1037E0008CC3005C3C010801AC3496EC3C01080140
64326+:1037F000AC2396E8162000088FBF002C8FB4002859
64327+:103800008FB300248FB200208FB1001C8FB000183E
64328+:1038100003E0000827BD00303C1180009624010E13
64329+:103820000E000FD43094FFFF3C0B08018D6B96F413
64330+:103830000260382102802821AE2B01803C13080150
64331+:103840008E7396D401602021240600830E00102F71
64332+:10385000AFB300108FBF002C8FB400288FB30024AB
64333+:103860008FB200208FB1001C8FB0001803E0000859
64334+:1038700027BD00303C1808008F1831FC270F0001CD
64335+:103880003C010800AC2F31FC0A0021D700000000E9
64336+:103890001474FFB900000000A0C000FF3C05080040
64337+:1038A0008CA531E43C0308008C6331E03C02080045
64338+:1038B0008C4232048F99003834A80001241F000282
64339+:1038C0003C010801AC2396F43C010801A02896F0C5
64340+:1038D0003C010801A02296F3A33F00090A002190B1
64341+:1038E0008F8600380E002146000000000A0021D714
64342+:1038F0008F8600383C1F080193FF96D424190001DD
64343+:1039000013F902298F8700703C100801921096D895
64344+:103910003C06080190C696D610C000050200A02102
64345+:103920003C040801908496D9109001E48F870078B8
64346+:10393000001088408F9F0078023048210009C8801D
64347+:10394000033F702195D80008270F0001A5CF00087C
64348+:103950003C040801908496D93C05080190A596D6B0
64349+:103960000E0020F4000000008F8700780230202134
64350+:103970000004308000C720218C8500048F820074F1
64351+:1039800000A2402305020006AC8200048C8A0000DD
64352+:103990008F830070014310235C400001AC83000062
64353+:1039A0008F86003890CB00FF2D6C00025580002DD3
64354+:1039B000241400010230F821001F40800107282153
64355+:1039C00090B9000B8CAE00040019C0400319782197
64356+:1039D000000F1880006710218C4D000001AE882375
64357+:1039E0002630FFFF5E00001F241400018C440004F9
64358+:1039F0008CAA0000008A482319200019240E000414
64359+:103A00003C010801A02E96D590AD000B8CAB0004B4
64360+:103A1000000D8840022D80210010108000471021E9
64361+:103A20008C44000401646023058202009443000872
64362+:103A300090DF00FE90B9000B33E500FF54B900049D
64363+:103A40000107A021A0D400FE8F8700780107A021E4
64364+:103A50009284000B0E0020F4240500018F860038AC
64365+:103A600024140001125400962E500001160000424A
64366+:103A70003C08FFFF241900021659FF3F0000000018
64367+:103A8000A0C000FF8F860038A0D200090A0021D70D
64368+:103A90008F86003890C700092404000230E300FF3D
64369+:103AA0001064016F24090004106901528F880074AA
64370+:103AB0008CCE0054010E682325B10001062001754B
64371+:103AC000241800043C010801A03896D53C010801E7
64372+:103AD000A02096D490D400FD90D200FF2E4F00027B
64373+:103AE00015E0FF14328400FF000438408F8900780D
64374+:103AF00090DF00FF00E41021000220800089C8212F
64375+:103B00002FE500029324000B14A0FF0A24070002F3
64376+:103B100000041840006480210010588001692821A9
64377+:103B20008CAC0004010C50230540FF020000000093
64378+:103B30003C030801906396D614600005246F0001D1
64379+:103B40003C010801A02496D93C010801A02796D782
64380+:103B50003C010801A02F96D690CE00FF24E700017B
64381+:103B600031CD00FF01A7882B1220FFE990A4000BA4
64382+:103B70000A0021C6000000003C0508018CA596D46F
64383+:103B80003C12000400A8F82413F2000624020005E9
64384+:103B90003C090801912996D5152000022402000352
64385+:103BA000240200053C010801A02296F190C700FF05
64386+:103BB00014E0012024020002A0C200090A0021D75B
64387+:103BC0008F86003890CC00FF1180FEDA240A0001B5
64388+:103BD0008F8C00748F890078240F00030180682186
64389+:103BE0001160001E240E0002000540400105A021C6
64390+:103BF00000142080008990218E51000401918023BF
64391+:103C00000600FECC000000003C020801904296D65F
64392+:103C100014400005245800013C010801A02A96D751
64393+:103C20003C010801A02596D93C010801A03896D690
64394+:103C300090DF00FF010510210002C88033E500FF7E
64395+:103C4000254A00010329202100AA402B1500FEB9B6
64396+:103C50009085000B1560FFE50005404000054040E1
64397+:103C600001051821000310803C010801A02A96D408
64398+:103C70003C010801A02596D8004918218C64000455
64399+:103C800000E4F82327F9FFFF1F20FFE900000000F0
64400+:103C90008C63000000E358230560013A01A38823E8
64401+:103CA00010E301170184C0231B00FEA200000000E6
64402+:103CB0003C010801A02E96D50A002305240B000123
64403+:103CC000240E0004A0CE00093C0D08008DAD31F893
64404+:103CD0008F86003825A200013C010800AC2231F893
64405+:103CE0000A0021D7000000008CD9005C00F9C02335
64406+:103CF0001F00FE7B000000008CDF005C10FFFF65F2
64407+:103D00008F8400748CC3005C008340232502000173
64408+:103D10001C40FF60000000008CC9005C248700018B
64409+:103D200000E9282B10A0FE943C0D80008DAB01040F
64410+:103D30003C0C0001016C50241140FE8F2402001045
64411+:103D40003C010801A02296F10A0021D700000000E2
64412+:103D50008F9100748F86003826220001ACC2005C6F
64413+:103D60000A002292241400018F8700382404FF8067
64414+:103D70000000882190E9000A241400010124302564
64415+:103D8000A0E6000A3C05080190A596D63C0408016F
64416+:103D9000908496D90E0020F4000000008F86003831
64417+:103DA0008F85007890C800FD310700FF0007404074
64418+:103DB0000107F821001FC0800305C8219323000BD1
64419+:103DC000A0C300FD8F8500788F8600380305602131
64420+:103DD000918F000B000F704001CF6821000D808093
64421+:103DE000020510218C4B0000ACCB00548D840004E4
64422+:103DF0008F83007400645023194000022482000164
64423+:103E00002462000101074821ACC2005C0009308037
64424+:103E100000C5402100E02021240500010E0020F40F
64425+:103E20009110000B8F86003890C500FF10A0FF0C8A
64426+:103E3000001070408F85007801D06821000D10803F
64427+:103E4000004558218D6400008F8C0074018450233C
64428+:103E50002547000104E0FF02263100013C03080170
64429+:103E6000906396D62E2F0002247800013C010801B1
64430+:103E7000A03896D63C010801A03496D711E0FEF890
64431+:103E8000020038210A002365000740408F84003873
64432+:103E90008F8300748C85005800A340230502FE9A8E
64433+:103EA000AC8300580A00223B000000003C070801D8
64434+:103EB00090E796F2240200FF10E200BE8F860038E1
64435+:103EC0003C110801963196FA3C030801246396F8E8
64436+:103ED000262500013230FFFF30ABFFFF02036021D7
64437+:103EE0002D6A00FF1540008D918700043C010801F8
64438+:103EF000A42096FA8F88003800074840012728211F
64439+:103F0000911800FF000530802405000127140001EE
64440+:103F1000A11400FF3C120801925296F28F8800789B
64441+:103F20008F8E0070264F000100C820213C0108013F
64442+:103F3000A02F96F2AC8E00008F8D0074A48500082F
64443+:103F4000AC8D00043C030801906396D414600077A4
64444+:103F5000000090213C010801A02596D4A087000B09
64445+:103F60008F8C007800CC5021A147000A8F82003846
64446+:103F7000A04700FD8F840038A08700FE8F860038A0
64447+:103F80008F9F0070ACDF00548F990074ACD900583B
64448+:103F90008F8D00780127C02100185880016DA02165
64449+:103FA000928F000A000F704001CF18210003888013
64450+:103FB000022D8021A207000B8F8600780166602108
64451+:103FC000918A000B000A1040004A2021000428803A
64452+:103FD00000A64021A107000A3C07800834E90080C0
64453+:103FE0008D2200308F860038ACC2005C0A0022921D
64454+:103FF0002414000190CA00FF1540FEAD8F880074A4
64455+:10400000A0C400090A0021D78F860038A0C000FD97
64456+:104010008F98003824060001A30000FE3C0108012F
64457+:10402000A02696D53C010801A02096D40A0021C6FE
64458+:104030000000000090CB00FF3C040801908496F340
64459+:10404000316C00FF0184502B1540000F2402000347
64460+:1040500024020004A0C200090A0021D78F8600387C
64461+:1040600090C3000A2410FF8002035824316C00FF23
64462+:104070001180FDC1000000003C010801A02096D580
64463+:104080000A0021C600000000A0C200090A0021D7D2
64464+:104090008F86003890D4000A2412FF8002544824EE
64465+:1040A000312800FF1500FFF4240200083C0108013C
64466+:1040B000A02296F10A0021D70000000000108840DD
64467+:1040C0008F8B0070023018210003688001A7202127
64468+:1040D000AC8B00008F8A0074240C0001A48C0008B3
64469+:1040E000AC8A00043C05080190A596D62402000184
64470+:1040F00010A2FE1E24A5FFFF0A0022519084000B8F
64471+:104100000184A0231A80FD8B000000003C010801FF
64472+:10411000A02E96D50A002305240B00013C010801BE
64473+:10412000A42596FA0A0023B78F880038240B0001D3
64474+:10413000106B00228F9800388F85003890BF00FFE9
64475+:1041400033F900FF1079002B000000003C1F08012C
64476+:1041500093FF96D8001FC840033FC0210018A080DD
64477+:104160000288782191EE000AA08E000A8F8D0078D7
64478+:104170003C030801906396D800CD88210A0023DD16
64479+:10418000A223000B263000010600003101A4902379
64480+:104190000640002B240200033C010801A02F96D505
64481+:1041A0000A002305240B00018F8900380A00223BF6
64482+:1041B000AD2700540A00229124120001931400FD3F
64483+:1041C000A094000B8F8800388F8F0078910E00FE2E
64484+:1041D00000CF6821A1AE000A8F910038A22700FD10
64485+:1041E0008F8300708F900038AE0300540A0023DEE6
64486+:1041F0008F8D007890B000FEA090000A8F8B003861
64487+:104200008F8C0078916A00FD00CC1021A04A000B31
64488+:104210008F840038A08700FE8F8600748F85003859
64489+:10422000ACA600580A0023DE8F8D007894B80008F1
64490+:10423000ACA40004030378210A002285A4AF00087F
64491+:104240003C010801A02296D50A0021C6000000000A
64492+:1042500090CF0009240D000431EE00FF11CDFD8543
64493+:10426000240200013C010801A02296D50A0021C6C3
64494+:1042700000000000080033440800334408003420E4
64495+:10428000080033F4080033D8080033280800332826
64496+:10429000080033280800334C8008010080080080A3
64497+:1042A000800800005F865437E4AC62CC50103A4579
64498+:1042B00036621985BF14C0E81BC27A1E84F4B55655
64499+:1042C000094EA6FE7DDA01E7C04D748108005A74DC
64500+:1042D00008005AB808005A5C08005A5C08005A5C8A
64501+:1042E00008005A5C08005A7408005A5C08005A5CBE
64502+:1042F00008005AC008005A5C080059D408005A5CEB
64503+:1043000008005A5C08005AC008005A5C08005A5C51
64504+:1043100008005A5C08005A5C08005A5C08005A5CA5
64505+:1043200008005A5C08005A5C08005A5C08005A5C95
64506+:1043300008005A9408005A5C08005A9408005A5C15
64507+:1043400008005A5C08005A5C08005A9808005A9401
64508+:1043500008005A5C08005A5C08005A5C08005A5C65
64509+:1043600008005A5C08005A5C08005A5C08005A5C55
64510+:1043700008005A5C08005A5C08005A5C08005A5C45
64511+:1043800008005A5C08005A5C08005A5C08005A5C35
64512+:1043900008005A5C08005A5C08005A5C08005A5C25
64513+:1043A00008005A9808005A9808005A5C08005A9861
64514+:1043B00008005A5C08005A5C08005A5C08005A5C05
64515+:1043C00008005A5C08005A5C08005A5C08005A5CF5
64516+:1043D00008005A5C08005A5C08005A5C08005A5CE5
64517+:1043E00008005A5C08005A5C08005A5C08005A5CD5
64518+:1043F00008005A5C08005A5C08005A5C08005A5CC5
64519+:1044000008005A5C08005A5C08005A5C08005A5CB4
64520+:1044100008005A5C08005A5C08005A5C08005A5CA4
64521+:1044200008005A5C08005A5C08005A5C08005A5C94
64522+:1044300008005A5C08005A5C08005A5C08005A5C84
64523+:1044400008005A5C08005A5C08005A5C08005A5C74
64524+:1044500008005A5C08005A5C08005A5C08005A5C64
64525+:1044600008005A5C08005A5C08005A5C08005A5C54
64526+:1044700008005A5C08005A5C08005A5C08005A5C44
64527+:1044800008005A5C08005A5C08005A5C08005A5C34
64528+:1044900008005A5C08005A5C08005A5C08005A5C24
64529+:1044A00008005A5C08005A5C08005A5C08005A5C14
64530+:1044B00008005A5C08005A5C08005A5C08005A5C04
64531+:1044C00008005A5C08005A5C08005A5C08005ADC74
64532+:1044D0000800782C08007A900800783808007628C0
64533+:1044E00008007838080078C4080078380800762872
64534+:1044F0000800762808007628080076280800762824
64535+:104500000800762808007628080076280800762813
64536+:1045100008007628080078580800784808007628AF
64537+:1045200008007628080076280800762808007628F3
64538+:1045300008007628080076280800762808007628E3
64539+:1045400008007628080076280800762808007848B1
64540+:10455000080082FC08008188080082C40800818865
64541+:104560000800829408008070080081880800818813
64542+:1045700008008188080081880800818808008188F7
64543+:1045800008008188080081880800818808008188E7
64544+:104590000800818808008188080081B008008D34F7
64545+:1045A00008008E9008008E70080088D808008D4C96
64546+:1045B0000A00012400000000000000000000000DBF
64547+:1045C000747061362E322E31620000000602010145
64548+:1045D00000000000000000000000000000000000DB
64549+:1045E00000000000000000000000000000000000CB
64550+:1045F00000000000000000000000000000000000BB
64551+:1046000000000000000000000000000000000000AA
64552+:10461000000000000000000000000000000000009A
64553+:10462000000000000000000000000000000000008A
64554+:10463000000000000000000000000000000000007A
64555+:104640000000000010000003000000000000000D4A
64556+:104650000000000D3C020800244217203C03080023
64557+:1046600024632A10AC4000000043202B1480FFFD7F
64558+:10467000244200043C1D080037BD2FFC03A0F0219C
64559+:104680003C100800261004903C1C0800279C1720B2
64560+:104690000E000262000000000000000D2402FF80F6
64561+:1046A00027BDFFE000821024AFB00010AF42002011
64562+:1046B000AFBF0018AFB10014936500043084007FD1
64563+:1046C000034418213C0200080062182130A5002094
64564+:1046D000036080213C080111277B000814A0000220
64565+:1046E0002466005C2466005892020004974301048B
64566+:1046F000920400043047000F3063FFFF3084004015
64567+:10470000006728231080000900004821920200055C
64568+:1047100030420004104000050000000010A000031B
64569+:104720000000000024A5FFFC2409000492020005FB
64570+:1047300030420004104000120000000010A00010E1
64571+:10474000000000009602000200A72021010440257D
64572+:104750002442FFFEA7421016920300042402FF80A9
64573+:1047600000431024304200FF104000033C020400CC
64574+:104770000A000174010240258CC20000AF421018EB
64575+:104780008F4201780440FFFE2402000AA742014044
64576+:1047900096020002240400093042000700021023A0
64577+:1047A00030420007A7420142960200022442FFFE67
64578+:1047B000A7420144A740014697420104A74201488D
64579+:1047C0008F420108304200205040000124040001C3
64580+:1047D00092020004304200101440000234830010A2
64581+:1047E00000801821A743014A0000000000000000DB
64582+:1047F0000000000000000000AF48100000000000B2
64583+:104800000000000000000000000000008F421000C7
64584+:104810000441FFFE3102FFFF1040000700000000CE
64585+:1048200092020004304200401440000300000000E7
64586+:104830008F421018ACC20000960200063042FFFF03
64587+:10484000244200020002104300021040036288214B
64588+:10485000962200001120000D3044FFFF00A7102118
64589+:104860008F8300388F45101C0002108200021080D8
64590+:1048700000431021AC45000030A6FFFF0E00058D5F
64591+:1048800000052C0200402021A62200009203000413
64592+:104890002402FF8000431024304200FF1040001F1C
64593+:1048A0000000000092020005304200021040001B90
64594+:1048B000000000009742100C2442FFFEA742101691
64595+:1048C000000000003C02040034420030AF421000FF
64596+:1048D00000000000000000000000000000000000D8
64597+:1048E0008F4210000441FFFE000000009742100CB0
64598+:1048F0008F45101C3042FFFF24420030000210821E
64599+:1049000000021080005B1021AC45000030A6FFFFC4
64600+:104910000E00058D00052C02A62200009604000260
64601+:10492000248400080E0001E93084FFFF974401044D
64602+:104930000E0001F73084FFFF8FBF00188FB1001405
64603+:104940008FB000103C02100027BD002003E00008DB
64604+:10495000AF4201783084FFFF308200078F8500244A
64605+:1049600010400002248300073064FFF800A41021E7
64606+:1049700030421FFF03421821247B4000AF850028EE
64607+:10498000AF82002403E00008AF4200843084FFFFC0
64608+:104990003082000F8F85002C8F860034104000027B
64609+:1049A0002483000F3064FFF000A410210046182B70
64610+:1049B000AF8500300046202314600002AF82002C37
64611+:1049C000AF84002C8F82002C340480000342182115
64612+:1049D00000641821AF83003803E00008AF42008074
64613+:1049E0008F820014104000088F8200048F82FFDC49
64614+:1049F000144000058F8200043C02FFBF3442FFFFD9
64615+:104A0000008220248F82000430430006240200022A
64616+:104A10001062000F3C0201012C62000350400005AF
64617+:104A2000240200041060000F3C0200010A00023062
64618+:104A30000000000010620005240200061462000C51
64619+:104A40003C0201110A000229008210253C020011DB
64620+:104A500000821025AF421000240200010A0002303B
64621+:104A6000AF82000C00821025AF421000AF80000C16
64622+:104A700000000000000000000000000003E000084B
64623+:104A8000000000008F82000C1040000400000000B5
64624+:104A90008F4210000441FFFE0000000003E0000808
64625+:104AA000000000008F8200102443F800000231C291
64626+:104AB00024C2FFF02C6303011060000300021042C7
64627+:104AC0000A000257AC8200008F85001800C5102B29
64628+:104AD0001440000B0000182100C5102324470001DA
64629+:104AE0008F82001C00A210212442FFFF0046102BE1
64630+:104AF000544000042402FFFF0A000257AC87000064
64631+:104B00002402FFFF0A000260AC8200008C820000D9
64632+:104B10000002194000621821000318800062182169
64633+:104B2000000318803C0208002442175C0062182130
64634+:104B300003E000080060102127BDFFD8AFBF0020B0
64635+:104B4000AFB1001CAFB000183C0460088C8250006C
64636+:104B50002403FF7F3C066000004310243442380CDD
64637+:104B6000AC8250008CC24C1C3C1A80000002160221
64638+:104B70003042000F10400007AF82001C8CC34C1C59
64639+:104B80003C02001F3442FC0000621824000319C2DA
64640+:104B9000AF8300188F420008275B400034420001B9
64641+:104BA000AF420008AF8000243C02601CAF40008090
64642+:104BB000AF4000848C4500088CC308083402800094
64643+:104BC000034220212402FFF0006218243C020080EE
64644+:104BD0003C010800AC2204203C025709AF84003895
64645+:104BE00014620004AF850034240200010A0002921E
64646+:104BF000AF820014AF8000148F42000038420001E1
64647+:104C0000304200011440FFFC8F8200141040001657
64648+:104C10000000000097420104104000058F8300004F
64649+:104C2000146000072462FFFF0A0002A72C62000A3A
64650+:104C30002C620010504000048F83000024620001A9
64651+:104C4000AF8200008F8300002C62000A1440000332
64652+:104C50002C6200070A0002AEAF80FFDC10400002A9
64653+:104C600024020001AF82FFDC8F4301088F44010062
64654+:104C700030622000AF83000410400008AF840010B1
64655+:104C80003C0208008C42042C244200013C01080034
64656+:104C9000AC22042C0A00058A3C0240003065020068
64657+:104CA00014A0000324020F001482026024020D00ED
64658+:104CB00097420104104002C83C02400030624000AC
64659+:104CC000144000AD8F8200388C4400088F42017878
64660+:104CD0000440FFFE24020800AF42017824020008CD
64661+:104CE000A7420140A7400142974201048F8400047B
64662+:104CF0003051FFFF30820001104000070220802168
64663+:104D00002623FFFE240200023070FFFFA742014667
64664+:104D10000A0002DBA7430148A74001463C02080005
64665+:104D20008C42043C1440000D8F8300103082002020
64666+:104D30001440000224030009240300010060202124
64667+:104D40008F830010240209005062000134840004A3
64668+:104D5000A744014A0A0002F60000000024020F00E6
64669+:104D60001462000530820020144000062403000D68
64670+:104D70000A0002F524030005144000022403000980
64671+:104D800024030001A743014A3C0208008C4204208E
64672+:104D90003C0400480E00020C004420250E000235A1
64673+:104DA000000000008F82000C1040003E0000000058
64674+:104DB0008F4210003C0300200043102410400039B3
64675+:104DC0008F820004304200021040003600000000D4
64676+:104DD000974210141440003300000000974210085E
64677+:104DE0008F8800383042FFFF2442000600021882FC
64678+:104DF0000003388000E83021304300018CC40000FB
64679+:104E000010600004304200030000000D0A00033768
64680+:104E100000E81021544000103084FFFF3C05FFFFE4
64681+:104E200000852024008518260003182B0004102B71
64682+:104E300000431024104000050000000000000000A6
64683+:104E40000000000D00000000240002228CC20000BF
64684+:104E50000A000336004520253883FFFF0003182B86
64685+:104E60000004102B00431024104000050000000037
64686+:104E7000000000000000000D000000002400022BD4
64687+:104E80008CC200003444FFFF00E81021AC44000055
64688+:104E90003C0208008C420430244200013C0108001E
64689+:104EA000AC2204308F6200008F840038AF8200088B
64690+:104EB0008C8300003402FFFF1462000F00001021F9
64691+:104EC0003C0508008CA504543C0408008C84045064
64692+:104ED00000B0282100B0302B008220210086202144
64693+:104EE0003C010800AC2504543C010800AC240450EB
64694+:104EF0000A000580240400088C8200003042010072
64695+:104F00001040000F000010213C0508008CA5044C47
64696+:104F10003C0408008C84044800B0282100B0302BE9
64697+:104F200000822021008620213C010800AC25044C91
64698+:104F30003C010800AC2404480A0005802404000851
64699+:104F40003C0508008CA504443C0408008C84044003
64700+:104F500000B0282100B0302B0082202100862021C3
64701+:104F60003C010800AC2504443C010800AC2404408A
64702+:104F70000A000580240400088F6200088F62000088
64703+:104F800000021602304300F02402003010620005D7
64704+:104F900024020040106200E08F8200200A00058891
64705+:104FA0002442000114A000050000000000000000E1
64706+:104FB0000000000D00000000240002568F4201781E
64707+:104FC0000440FFFE000000000E00023D27A4001078
64708+:104FD0001440000500408021000000000000000D8A
64709+:104FE000000000002400025D8E0200001040000559
64710+:104FF00000000000000000000000000D00000000A4
64711+:10500000240002608F62000C0443000324020001AC
64712+:105010000A00042EAE000000AE0200008F820038AD
64713+:105020008C480008A20000078F65000C8F64000404
64714+:1050300030A3FFFF0004240200852023308200FFFC
64715+:105040000043102124420005000230832CC200815D
64716+:10505000A605000A14400005A20400040000000098
64717+:105060000000000D00000000240002788F85003849
64718+:105070000E0005AB260400148F6200048F43010864
64719+:10508000A60200083C02100000621824106000080C
64720+:105090000000000097420104920300072442FFEC45
64721+:1050A000346300023045FFFF0A0003C3A203000778
64722+:1050B000974201042442FFF03045FFFF96060008A6
64723+:1050C0002CC200135440000592030007920200070F
64724+:1050D00034420001A20200079203000724020001EB
64725+:1050E00010620005240200031062000B8F8200385A
64726+:1050F0000A0003E030C6FFFF8F8200383C04FFFF48
64727+:105100008C43000C0064182400651825AC43000C87
64728+:105110000A0003E030C6FFFF3C04FFFF8C43001091
64729+:105120000064182400651825AC43001030C6FFFF4A
64730+:1051300024C2000200021083A20200058F830038FF
64731+:10514000304200FF00021080004328218CA800009C
64732+:105150008CA2000024030004000217021443001272
64733+:1051600000000000974201043C03FFFF01031824E4
64734+:105170003042FFFF004610232442FFFE006240251C
64735+:10518000ACA8000092030005306200FF000210800E
64736+:1051900000501021904200143042000F00431021B3
64737+:1051A0000A000415A20200068CA400049742010420
64738+:1051B0009603000A3088FFFF3042FFFF00461023AD
64739+:1051C0002442FFD60002140001024025ACA80004CE
64740+:1051D000920200079204000524630028000318834C
64741+:1051E0000064182134420004A2030006A202000752
64742+:1051F0008F8200042403FFFB34420002004310248A
64743+:10520000AF820004920300068F87003800031880E5
64744+:10521000007010218C4400203C02FFF63442FFFF56
64745+:105220000082402400671821AE04000CAC68000C1A
64746+:10523000920500063C03FF7F8E02000C00052880CB
64747+:1052400000B020213463FFFF01033024948800263E
64748+:1052500000A7282100431024AE02000CAC860020D9
64749+:10526000AC880024ACA8001024020010A742014022
64750+:1052700024020002A7400142A7400144A742014680
64751+:10528000974201043C0400082442FFFEA742014863
64752+:10529000240200010E00020CA742014A9603000AF4
64753+:1052A0009202000400431021244200023042000711
64754+:1052B00000021023304200070E000235AE0200103B
64755+:1052C0008F6200003C0308008C6304442404001037
64756+:1052D000AF820008974201043042FFFF2442FFFEE4
64757+:1052E00000403821000237C33C0208008C420440D1
64758+:1052F000006718210067282B004610210045102167
64759+:105300003C010800AC2304443C010800AC220440EA
64760+:105310000A0005150000000014A0000500000000B0
64761+:10532000000000000000000D000000002400030A3F
64762+:105330008F4201780440FFFE000000000E00023D95
64763+:1053400027A4001414400005004080210000000044
64764+:105350000000000D00000000240003118E02000078
64765+:105360005440000692020007000000000000000DFB
64766+:10537000000000002400031C9202000730420004D9
64767+:10538000104000058F8200042403FFFB344200021A
64768+:1053900000431024AF8200048F620004044300081D
64769+:1053A00092020007920200068E03000CAE0000007D
64770+:1053B0000002108000501021AC4300209202000730
64771+:1053C00030420004544000099602000A920200058F
64772+:1053D0003C03000100021080005010218C46001890
64773+:1053E00000C33021AC4600189602000A9206000461
64774+:1053F000277100080220202100C2302124C60005A8
64775+:10540000260500140E0005AB00063082920400064B
64776+:105410008F6500043C027FFF000420800091202162
64777+:105420008C8300043442FFFF00A228240065182169
64778+:10543000AC8300049202000792040005920300046A
64779+:10544000304200041040001496070008308400FF2A
64780+:1054500000042080009120218C86000497420104E2
64781+:105460009605000A306300FF3042FFFF0043102121
64782+:105470000045102130E3FFFF004310232442FFD8F2
64783+:1054800030C6FFFF0002140000C23025AC860004C5
64784+:105490000A0004C992030007308500FF0005288038
64785+:1054A00000B128218CA4000097420104306300FF62
64786+:1054B0003042FFFF00431021004710233C03FFFF51
64787+:1054C000008320243042FFFF00822025ACA400008E
64788+:1054D0009203000724020001106200060000000091
64789+:1054E0002402000310620011000000000A0004EC16
64790+:1054F0008E03001097420104920300049605000AEF
64791+:105500008E24000C00431021004510212442FFF29C
64792+:105510003C03FFFF008320243042FFFF0082202550
64793+:10552000AE24000C0A0004EC8E0300109742010424
64794+:10553000920300049605000A8E24001000431021F7
64795+:10554000004510212442FFEE3C03FFFF008320248E
64796+:105550003042FFFF00822025AE2400108E03001091
64797+:105560002402000AA7420140A74301429603000A11
64798+:10557000920200043C04004000431021A742014471
64799+:10558000A740014697420104A742014824020001B6
64800+:105590000E00020CA742014A0E0002350000000076
64801+:1055A0008F6200009203000400002021AF820008F7
64802+:1055B000974201049606000A3042FFFF006218215C
64803+:1055C000006028213C0308008C6304443C0208006E
64804+:1055D0008C42044000651821004410210065382BDE
64805+:1055E000004710213C010800AC2304443C010800A2
64806+:1055F000AC22044092040004008620212484000A86
64807+:105600003084FFFF0E0001E9000000009744010410
64808+:105610003084FFFF0E0001F7000000003C02100084
64809+:10562000AF4201780A0005878F820020148200278C
64810+:105630003062000697420104104000673C024000BF
64811+:105640003062400010400005000000000000000033
64812+:105650000000000D00000000240004208F420178AB
64813+:105660000440FFFE24020800AF4201782402000833
64814+:10567000A7420140A74001428F82000497430104E2
64815+:1056800030420001104000073070FFFF2603FFFE8C
64816+:1056900024020002A7420146A74301480A00053F31
64817+:1056A0002402000DA74001462402000DA742014A32
64818+:1056B0008F62000024040008AF8200080E0001E998
64819+:1056C000000000000A0005190200202110400042DD
64820+:1056D0003C02400093620000304300F024020010BE
64821+:1056E0001062000524020070106200358F820020D5
64822+:1056F0000A000588244200018F62000097430104DC
64823+:105700003050FFFF3071FFFF8F4201780440FFFEF1
64824+:105710003202000700021023304200072403000A6F
64825+:105720002604FFFEA7430140A7420142A7440144CB
64826+:10573000A7400146A75101488F420108304200208E
64827+:10574000144000022403000924030001A743014A76
64828+:105750000E00020C3C0400400E0002350000000068
64829+:105760003C0708008CE70444021110212442FFFE8C
64830+:105770003C0608008CC604400040182100E3382194
64831+:10578000000010218F65000000E3402B00C2302193
64832+:105790002604000800C830213084FFFFAF850008D0
64833+:1057A0003C010800AC2704443C010800AC2604403E
64834+:1057B0000E0001E9000000000A0005190220202166
64835+:1057C0000E00013B000000008F82002024420001F7
64836+:1057D000AF8200203C024000AF4201380A00029232
64837+:1057E000000000003084FFFF30C6FFFF00052C00E2
64838+:1057F00000A628253882FFFF004510210045282BF0
64839+:105800000045102100021C023042FFFF004310211E
64840+:1058100000021C023042FFFF004310213842FFFF0C
64841+:1058200003E000083042FFFF3084FFFF30A5FFFF98
64842+:1058300000001821108000070000000030820001E5
64843+:105840001040000200042042006518210A0005A152
64844+:105850000005284003E000080060102110C0000689
64845+:1058600024C6FFFF8CA2000024A50004AC82000027
64846+:105870000A0005AB2484000403E0000800000000D7
64847+:1058800010A0000824A3FFFFAC8600000000000069
64848+:10589000000000002402FFFF2463FFFF1462FFFAF0
64849+:1058A0002484000403E00008000000000000000160
64850+:1058B0000A00002A00000000000000000000000DA7
64851+:1058C000747870362E322E3162000000060201001C
64852+:1058D00000000000000001360000EA600000000047
64853+:1058E00000000000000000000000000000000000B8
64854+:1058F00000000000000000000000000000000000A8
64855+:105900000000000000000000000000000000000097
64856+:105910000000001600000000000000000000000071
64857+:105920000000000000000000000000000000000077
64858+:105930000000000000000000000000000000000067
64859+:1059400000000000000000000000138800000000BC
64860+:10595000000005DC00000000000000001000000353
64861+:10596000000000000000000D0000000D3C020800D7
64862+:1059700024423D683C0308002463401CAC40000006
64863+:105980000043202B1480FFFD244200043C1D08002E
64864+:1059900037BD7FFC03A0F0213C100800261000A8B2
64865+:1059A0003C1C0800279C3D680E00044E00000000CF
64866+:1059B0000000000D27BDFFB4AFA10000AFA200049E
64867+:1059C000AFA30008AFA4000CAFA50010AFA6001451
64868+:1059D000AFA70018AFA8001CAFA90020AFAA0024F1
64869+:1059E000AFAB0028AFAC002CAFAD0030AFAE003491
64870+:1059F000AFAF0038AFB8003CAFB90040AFBC004417
64871+:105A0000AFBF00480E000591000000008FBF0048A6
64872+:105A10008FBC00448FB900408FB8003C8FAF003876
64873+:105A20008FAE00348FAD00308FAC002C8FAB0028D0
64874+:105A30008FAA00248FA900208FA8001C8FA7001810
64875+:105A40008FA600148FA500108FA4000C8FA3000850
64876+:105A50008FA200048FA1000027BD004C3C1B6004F6
64877+:105A60008F7A5030377B502803400008AF7A00000F
64878+:105A70008F86003C3C0390003C0280000086282575
64879+:105A800000A32025AC4400203C0380008C6700204C
64880+:105A900004E0FFFE0000000003E00008000000003A
64881+:105AA0000A000070240400018F85003C3C04800043
64882+:105AB0003483000100A3102503E00008AC8200201D
64883+:105AC00003E00008000010213084FFFF30A5FFFF35
64884+:105AD00010800007000018213082000110400002F1
64885+:105AE00000042042006518211480FFFB00052840B7
64886+:105AF00003E000080060102110C000070000000053
64887+:105B00008CA2000024C6FFFF24A50004AC82000084
64888+:105B100014C0FFFB2484000403E000080000000020
64889+:105B200010A0000824A3FFFFAC86000000000000C6
64890+:105B3000000000002402FFFF2463FFFF1462FFFA4D
64891+:105B40002484000403E000080000000090AA003153
64892+:105B50008FAB00108CAC00403C0300FF8D6800044C
64893+:105B6000AD6C00208CAD004400E060213462FFFF8A
64894+:105B7000AD6D00248CA700483C09FF000109C0243A
64895+:105B8000AD6700288CAE004C0182C824031978252B
64896+:105B9000AD6F0004AD6E002C8CAD0038314A00FFB3
64897+:105BA000AD6D001C94A900323128FFFFAD680010D4
64898+:105BB00090A70030A5600002A1600004A16700006A
64899+:105BC00090A30032306200FF0002198210600005CD
64900+:105BD000240500011065000E0000000003E000082D
64901+:105BE000A16A00018CD80028354A0080AD780018E1
64902+:105BF0008CCF0014AD6F00148CCE0030AD6E000859
64903+:105C00008CC4002CA16A000103E00008AD64000C04
64904+:105C10008CCD001CAD6D00188CC90014AD6900144A
64905+:105C20008CC80024AD6800088CC70020AD67000C4C
64906+:105C30008CC200148C8300700043C82B1320000713
64907+:105C4000000000008CC20014144CFFE400000000AF
64908+:105C5000354A008003E00008A16A00018C820070D0
64909+:105C60000A0000E6000000009089003027BDFFF820
64910+:105C70008FA8001CA3A900008FA300003C0DFF808B
64911+:105C800035A2FFFF8CAC002C00625824AFAB0000A3
64912+:105C9000A100000400C05821A7A000028D06000446
64913+:105CA00000A048210167C8218FA500000080502175
64914+:105CB0003C18FF7F032C20263C0E00FF2C8C00019B
64915+:105CC000370FFFFF35CDFFFF3C02FF0000AFC824B8
64916+:105CD00000EDC02400C27824000C1DC003236825F9
64917+:105CE00001F87025AD0D0000AD0E00048D240024D8
64918+:105CF000AFAD0000AD0400088D2C00202404FFFF90
64919+:105D0000AD0C000C9547003230E6FFFFAD060010E9
64920+:105D10009145004830A200FF000219C25060000106
64921+:105D20008D240034AD0400148D4700388FAA00186C
64922+:105D300027BD0008AD0B0028AD0A0024AD07001CEC
64923+:105D4000AD00002CAD00001803E00008AD000020FD
64924+:105D500027BDFFE0AFB20018AFB10014AFB0001024
64925+:105D6000AFBF001C9098003000C088213C0D00FFA0
64926+:105D7000330F007FA0CF0000908E003135ACFFFFC5
64927+:105D80003C0AFF00A0CE000194A6001EA220000441
64928+:105D90008CAB00148E29000400A08021016C282403
64929+:105DA000012A40240080902101052025A62600021A
64930+:105DB000AE24000426050020262400080E000092D0
64931+:105DC00024060002924700302605002826240014ED
64932+:105DD00000071E000003160324060004044000030D
64933+:105DE0002403FFFF965900323323FFFF0E00009279
64934+:105DF000AE230010262400248FBF001C8FB2001891
64935+:105E00008FB100148FB00010240500030000302172
64936+:105E10000A00009C27BD002027BDFFD8AFB1001CA1
64937+:105E2000AFB00018AFBF002090A9003024020001DD
64938+:105E300000E050213123003F00A040218FB00040FE
64939+:105E40000080882100C04821106200148FA700380C
64940+:105E5000240B000500A0202100C02821106B001396
64941+:105E6000020030210E000128000000009225007C75
64942+:105E700030A400021080000326030030AE00003082
64943+:105E8000260300348FBF00208FB1001C8FB0001894
64944+:105E90000060102103E0000827BD00280E0000A7C5
64945+:105EA000AFB000100A00016F000000008FA3003C9B
64946+:105EB000010020210120282101403021AFA3001042
64947+:105EC0000E0000EEAFB000140A00016F00000000E9
64948+:105ED0003C06800034C20E008C4400108F850044C4
64949+:105EE000ACA400208C43001803E00008ACA30024FD
64950+:105EF0003C06800034C20E008C4400148F850044A0
64951+:105F0000ACA400208C43001C03E00008ACA30024D8
64952+:105F10009382000C1040001B2483000F2404FFF028
64953+:105F20000064382410E00019978B00109784000E4D
64954+:105F30009389000D3C0A601C0A0001AC01644023F7
64955+:105F400001037021006428231126000231C2FFFFE3
64956+:105F500030A2FFFF0047302B50C0000E00E4482164
64957+:105F60008D4D000C31A3FFFF00036400000C2C03D7
64958+:105F700004A1FFF30000302130637FFF0A0001A479
64959+:105F80002406000103E00008000000009784000ED2
64960+:105F900000E448213123FFFF3168FFFF0068382B00
64961+:105FA00054E0FFF8A783000E938A000D114000050E
64962+:105FB000240F0001006BC023A380000D03E0000844
64963+:105FC000A798000E006BC023A38F000D03E000080C
64964+:105FD000A798000E03E000080000000027BDFFE8BE
64965+:105FE000AFB000103C10800036030140308BFFFF43
64966+:105FF00093AA002BAFBF0014A46B000436040E005C
64967+:106000009488001630C600FF8FA90030A4680006EF
64968+:10601000AC650008A0660012A46A001AAC670020F4
64969+:106020008FA5002CA4690018012020210E000198E2
64970+:10603000AC6500143C021000AE0201788FBF001462
64971+:106040008FB0001003E0000827BD00188F85000006
64972+:106050002484000727BDFFF83084FFF83C06800049
64973+:1060600094CB008A316AFFFFAFAA00008FA900001D
64974+:10607000012540232507FFFF30E31FFF0064102B9D
64975+:106080001440FFF700056882000D288034CC4000E2
64976+:1060900000AC102103E0000827BD00088F8200003B
64977+:1060A0002486000730C5FFF800A2182130641FFFC6
64978+:1060B00003E00008AF8400008F87003C8F84004419
64979+:1060C00027BDFFB0AFB70044AFB40038AFB1002C6C
64980+:1060D000AFBF0048AFB60040AFB5003CAFB300342F
64981+:1060E000AFB20030AFB000283C0B80008C8600249B
64982+:1060F000AD6700808C8A002035670E00356901008D
64983+:10610000ACEA00108C8800248D2500040000B82122
64984+:10611000ACE800188CE3001000A688230000A02142
64985+:10612000ACE300148CE20018ACE2001C122000FE6C
64986+:1061300000E0B021936C0008118000F40000000022
64987+:10614000976F001031EEFFFF022E682B15A000EFB5
64988+:1061500000000000977200103250FFFFAED0000028
64989+:106160003C0380008C740000329300081260FFFD35
64990+:106170000000000096D800088EC700043305FFFF1A
64991+:1061800030B5000112A000E4000000000000000D86
64992+:1061900030BFA0402419004013F9011B30B4A00007
64993+:1061A000128000DF000000009373000812600008F6
64994+:1061B00000000000976D001031ACFFFF00EC202BB9
64995+:1061C0001080000330AE004011C000D50000000078
64996+:1061D000A7850040AF87003893630008022028217C
64997+:1061E000AFB10020146000F527B40020AF60000CB0
64998+:1061F000978F004031F14000162000022403001662
64999+:106200002403000E24054007A363000AAF650014B1
65000+:10621000938A00428F70001431550001001512401E
65001+:1062200002024825AF690014979F00408F78001440
65002+:1062300033F9001003194025AF680014979200400D
65003+:106240003247000810E0016E000000008F67001464
65004+:106250003C1210003C11800000F27825AF6F001452
65005+:1062600036230E00946E000A3C0D81002406000EB9
65006+:1062700031CCFFFF018D2025AF640004A36600022E
65007+:106280009373000A3406FFFC266B0004A36B000A1C
65008+:1062900097980040330820001100015F00000000C3
65009+:1062A0003C05800034A90E00979900409538000CF9
65010+:1062B00097870040001940423312C00031030003A9
65011+:1062C00000127B0330F11000006F6825001172038B
65012+:1062D00001AE6025000C20C0A76400129793004017
65013+:1062E000936A000A001359823175003C02AA1021FA
65014+:1062F0002450003CA3700009953F000C33F93FFF88
65015+:10630000A779001097700012936900090130F821F5
65016+:1063100027E5000230B900070019C0233308000741
65017+:10632000A368000B9371000997720012976F001019
65018+:10633000322700FF8F910038978D004000F218211E
65019+:10634000006F702101C6602131A6004010C0000519
65020+:106350003185FFFF00B1102B3C1280001040001768
65021+:10636000000098210225A82B56A0013E8FA50020F1
65022+:106370003C048000348A0E008D5300143C068000DB
65023+:10638000AD5300108D4B001CAD4B0018AD45000007
65024+:106390008CCD000031AC00081180FFFD34CE0E0022
65025+:1063A00095C3000800A0882100009021A783004029
65026+:1063B0008DC6000424130001AF860038976F0010CB
65027+:1063C00031F5FFFF8E9F000003F1282310A0011F6D
65028+:1063D000AE85000093620008144000DD000000005C
65029+:1063E0000E0001E7240400108F900048004028218F
65030+:1063F0003C023200320600FF000654000142F8253C
65031+:1064000026090001AF890048ACBF0000937900095C
65032+:1064100097780012936F000A332800FF3303FFFFC1
65033+:106420000103382100076C0031EE00FF01AE60254A
65034+:10643000ACAC00048F840048978B0040316A200088
65035+:106440001140010AACA4000897640012308BFFFFD2
65036+:1064500006400108ACAB000C978E004031C5000827
65037+:1064600014A0000226280006262800023C1F8000F7
65038+:1064700037E70E0094F900148CE5001C8F670004C8
65039+:10648000937800023324FFFF330300FFAFA3001013
65040+:106490008F6F0014AFA800180E0001CBAFAF00142F
65041+:1064A000240400100E0001FB000000008E9200008A
65042+:1064B00016400005000000008F7800142403FFBF81
65043+:1064C0000303A024AF7400148F67000C00F5C821EB
65044+:1064D000AF79000C9375000816A0000800000000BA
65045+:1064E00012600006000000008F6800143C0AEFFFF5
65046+:1064F0003549FFFE0109F824AF7F0014A37300089B
65047+:106500008FA500200A00034F02202021AED10000F9
65048+:106510000A00022D3C03800014E0FF1E30BFA040A3
65049+:106520000E0001900000A0212E9100010237B0253D
65050+:1065300012C000188FBF00488F87003C24170F003F
65051+:1065400010F700D43C0680008CD901780720FFFEAC
65052+:10655000241F0F0010FF00F634CA0E008D560014E1
65053+:1065600034C7014024080240ACF600048D49001CE9
65054+:106570003C141000ACE90008A0E00012A4E0001AEE
65055+:10658000ACE00020A4E00018ACE80014ACD4017822
65056+:106590008FBF00488FB700448FB600408FB5003CD6
65057+:1065A0008FB400388FB300348FB200308FB1002C1D
65058+:1065B0008FB0002803E0000827BD00508F910038FD
65059+:1065C000978800403C1280000220A821310700403B
65060+:1065D00014E0FF7C00009821977900108F9200381A
65061+:1065E0003338FFFF131200A8000020210080A021F3
65062+:1065F000108000F300A088211620FECE00000000CD
65063+:106600000A00031F2E9100013C0380008C62017878
65064+:106610000440FFFE240808008F860000AC68017863
65065+:106620003C038000946D008A31ACFFFF0186582343
65066+:10663000256AFFFF31441FFF2C8900081520FFF950
65067+:10664000000000008F8F0048347040008F83003CB2
65068+:1066500000E0A021240E0F0025E70001AF870048CD
65069+:1066600000D03021023488233C08800031F500FF3F
65070+:10667000106E0005240700019398004233130001B7
65071+:106680000013924036470001001524003C0A010027
65072+:10669000008A4825ACC900008F82004830BF003610
65073+:1066A00030B90008ACC200041320009900FF9825FF
65074+:1066B00035120E009650000A8F8700003C0F8100B3
65075+:1066C0003203FFFF24ED000835060140006F60250E
65076+:1066D0003C0E100031AB1FFF269200062405000E71
65077+:1066E000ACCC0020026E9825A4C5001AAF8B000028
65078+:1066F000A4D20018162000083C1080008F89003CAE
65079+:1067000024020F00512200022417000136730040BA
65080+:106710000E0001883C10800036060E008CCB001461
65081+:10672000360A014002402021AD4B00048CC5001CFC
65082+:10673000AD450008A1550012AD5300140E0001989C
65083+:106740003C151000AE1501780A000352000000004D
65084+:10675000936F0009976E0012936D000B31E500FFF7
65085+:1067600000AE202131AC00FF008C80212602000AFF
65086+:106770003050FFFF0E0001E7020020218F86004805
65087+:106780003C0341003C05800024CB0001AF8B004856
65088+:10679000936A00099769001230C600FF315F00FF5D
65089+:1067A0003128FFFF03E8382124F900020006C40065
65090+:1067B0000319782501E37025AC4E00008F6D000CA5
65091+:1067C00034A40E00948B001401B26025AC4C00047C
65092+:1067D0008C85001C8F670004936A00023164FFFF00
65093+:1067E000314900FFAFA900108F680014AFB1001845
65094+:1067F0000E0001CBAFA800140A0002FD0200202108
65095+:10680000AF600004A36000029798004033082000A6
65096+:106810001500FEA300003021A760001297840040FD
65097+:10682000936B000A3C10800030931F0000135183CB
65098+:10683000014BA82126A20028A362000936090E00F8
65099+:10684000953F000C0A000295A77F00108F7000147E
65100+:10685000360900400E000188AF6900140A0002C921
65101+:10686000000000000A00034F000020210641FEFA4C
65102+:10687000ACA0000C8CAC000C3C0D8000018D902570
65103+:106880000A0002EAACB2000C000090210A0002C526
65104+:1068900024130001128000073C028000344B0E00DC
65105+:1068A0009566000830D300401260004900000000E7
65106+:1068B0003C0680008CD001780600FFFE34C50E0037
65107+:1068C00094B500103C03050034CC014032B8FFFF02
65108+:1068D00003039025AD92000C8CAF0014240D200012
65109+:1068E0003C041000AD8F00048CAE001CAD8E00087F
65110+:1068F000A1800012A580001AAD800020A58000189C
65111+:10690000AD8D0014ACC401780A0003263C0680005B
65112+:106910008F9F0000351801402692000227F90008D9
65113+:1069200033281FFFA71200180A000391AF88000048
65114+:106930003C02800034450140ACA0000C1280001BDA
65115+:1069400034530E0034510E008E370010ACB70004E3
65116+:106950008E2400183C0B8000ACA400083570014068
65117+:1069600024040040A20000128FBF0048A600001AB5
65118+:106970008FB70044AE0000208FB60040A60000187C
65119+:106980008FB5003CAE0400148FB400388FB30034D0
65120+:106990008FB200308FB1002C8FB000283C02100065
65121+:1069A00027BD005003E00008AD6201788E66001438
65122+:1069B000ACA600048E64001C0A00042A3C0B800074
65123+:1069C0000E0001902E9100010A0003200237B0252D
65124+:1069D000000000000000000D00000000240003691A
65125+:1069E0000A0004013C06800027BDFFD8AFBF00208D
65126+:1069F0003C0980003C1F20FFAFB200183C0760003C
65127+:106A000035320E002402001037F9FFFDACE23008E9
65128+:106A1000AFB3001CAFB10014AFB00010AE5900000E
65129+:106A20000000000000000000000000000000000066
65130+:106A3000000000003C1800FF3713FFFDAE530000BC
65131+:106A40003C0B60048D7050002411FF7F3C0E00024F
65132+:106A50000211782435EC380C35CD0109ACED4C1819
65133+:106A6000240A0009AD6C50008CE80438AD2A0008F7
65134+:106A7000AD2000148CE54C1C3106FFFF38C42F718B
65135+:106A800000051E023062000F2486C0B310400007CC
65136+:106A9000AF8200088CE54C1C3C09001F3528FC0027
65137+:106AA00000A81824000321C2AF8400048CF1080858
65138+:106AB0003C0F57092412F0000232702435F0001008
65139+:106AC00001D0602601CF68262DAA00012D8B000180
65140+:106AD000014B382550E00009A380000C3C1F601CCE
65141+:106AE0008FF8000824190001A399000C33137C00CF
65142+:106AF000A7930010A780000EA380000DAF80004870
65143+:106B000014C00003AF8000003C066000ACC0442C01
65144+:106B10000E0005B93C1080000E000F1A361101005E
65145+:106B20003C12080026523DD03C13080026733E500C
65146+:106B30008E03000038640001308200011440FFFC25
65147+:106B40003C0B800A8E2600002407FF8024C90240E7
65148+:106B5000312A007F014B402101272824AE06002066
65149+:106B6000AF880044AE0500243C048000AF86003CA2
65150+:106B70008C8C01780580FFFE24180800922F0008F5
65151+:106B8000AC980178A38F0042938E004231CD000172
65152+:106B900011A0000F24050D0024DFF8002FF90301D8
65153+:106BA0001320001C000629C224A4FFF00004104298
65154+:106BB000000231400E00020200D2D8213C02400007
65155+:106BC0003C068000ACC201380A0004A000000000AE
65156+:106BD00010C50023240D0F0010CD00273C1F800896
65157+:106BE00037F9008093380000240E0050330F00FF67
65158+:106BF00015EEFFF33C0240000E000A3600000000D4
65159+:106C00003C0240003C068000ACC201380A0004A0EF
65160+:106C1000000000008F83000400A3402B1500000B30
65161+:106C20008F8B0008006B50212547FFFF00E5482BA4
65162+:106C30001520000600A36023000C19400E0002027C
65163+:106C40000073D8210A0004C43C0240000000000D7B
65164+:106C50000E000202000000000A0004C43C024000D2
65165+:106C60003C1B0800277B3F500E0002020000000082
65166+:106C70000A0004C43C0240003C1B0800277B3F7014
65167+:106C80000E000202000000000A0004C43C024000A2
65168+:106C90003C0660043C09080025290104ACC9502CBD
65169+:106CA0008CC850003C0580003C0200023507008083
65170+:106CB000ACC750003C040800248415A43C03080021
65171+:106CC0002463155CACA50008ACA2000C3C010800D4
65172+:106CD000AC243D603C010800AC233D6403E00008A7
65173+:106CE0002402000100A030213C1C0800279C3D68C4
65174+:106CF0003C0C04003C0B0002008B3826008C402624
65175+:106D00002CE200010007502B2D050001000A4880ED
65176+:106D10003C03080024633D60004520250123182121
65177+:106D20001080000300001021AC6600002402000166
65178+:106D300003E00008000000003C1C0800279C3D68A0
65179+:106D40003C0B04003C0A0002008A3026008B3826E7
65180+:106D50002CC200010006482B2CE5000100094080F0
65181+:106D60003C03080024633D600045202501031821F1
65182+:106D700010800005000010213C0C0800258C155CDB
65183+:106D8000AC6C00002402000103E0000800000000D9
65184+:106D90003C0900023C08040000883026008938269F
65185+:106DA0002CC30001008028212CE400010083102561
65186+:106DB0001040000B000030213C1C0800279C3D685F
65187+:106DC0003C0A80008D4E00082406000101CA682597
65188+:106DD000AD4D00088D4C000C01855825AD4B000CC5
65189+:106DE00003E0000800C010213C1C0800279C3D68FF
65190+:106DF0003C0580008CA6000C000420272402000122
65191+:106E000000C4182403E00008ACA3000C3C020002FC
65192+:106E10001082000B3C0560003C0704001087000353
65193+:106E20000000000003E00008000000008CA908D06A
65194+:106E3000240AFFFD012A402403E00008ACA808D082
65195+:106E40008CA408D02406FFFE0086182403E0000866
65196+:106E5000ACA308D03C05601A34A600108CC3008097
65197+:106E600027BDFFF88CC50084AFA3000093A40000E9
65198+:106E70002402000110820003AFA5000403E0000813
65199+:106E800027BD000893A7000114E0001497AC00028E
65200+:106E900097B800023C0F8000330EFFFC01CF682141
65201+:106EA000ADA50000A3A000003C0660008CC708D080
65202+:106EB0002408FFFE3C04601A00E82824ACC508D072
65203+:106EC0008FA300048FA200003499001027BD000892
65204+:106ED000AF22008003E00008AF2300843C0B800059
65205+:106EE000318AFFFC014B48218D2800000A00057DF6
65206+:106EF000AFA8000427BDFFE8AFBF00103C1C08008E
65207+:106F0000279C3D683C0580008CA4000C8CA20004EA
65208+:106F10003C0300020044282410A0000A00A3182407
65209+:106F20003C0604003C0400021460000900A6102482
65210+:106F30001440000F3C0404000000000D3C1C08003D
65211+:106F4000279C3D688FBF001003E0000827BD001894
65212+:106F50003C0208008C423D600040F809000000003F
65213+:106F60003C1C0800279C3D680A0005A68FBF001046
65214+:106F70003C0208008C423D640040F809000000001B
65215+:106F80000A0005AC00000000000411C003E0000886
65216+:106F9000244202403C04080024843FB42405001A23
65217+:106FA0000A00009C0000302127BDFFE0AFB00010B8
65218+:106FB0003C108000AFBF0018AFB1001436110100C3
65219+:106FC000922200090E0005B63044007F8E3F00007B
65220+:106FD0008F89003C3C0F008003E26021258800403F
65221+:106FE0000049F821240DFF80310E00783198007897
65222+:106FF00035F9000135F100020319382501D1482582
65223+:10700000010D302403ED5824018D2824240A00406A
65224+:1070100024040080240300C0AE0B0024AE0008103E
65225+:10702000AE0A0814AE040818AE03081CAE05080426
65226+:10703000AE070820AE060808AE0908243609090084
65227+:107040009539000C3605098033ED007F3338FFFF9A
65228+:10705000001889C0AE110800AE0F0828952C000C4E
65229+:107060008FBF00188FB10014318BFFFF000B51C090
65230+:10707000AE0A002C8CA400508FB000108CA3003CF2
65231+:107080008D2700048CA8001C8CA600383C0E800ABA
65232+:1070900001AE102127BD0020AF820044AF84005014
65233+:1070A000AF830054AF87004CAF88005C03E000085A
65234+:1070B000AF8600603C09080091293FD924A800024E
65235+:1070C0003C05110000093C0000E8302500C51825EA
65236+:1070D00024820008AC83000003E00008AC800004B8
65237+:1070E0003C098000352309009128010B906A0011AA
65238+:1070F0002402002800804821314700FF00A07021B1
65239+:1071000000C068213108004010E20002340C86DD26
65240+:10711000240C08003C0A800035420A9A944700007B
65241+:10712000354B0A9C35460AA030F9FFFFAD39000007
65242+:107130008D780000354B0A8024040001AD3800042E
65243+:107140008CCF0000AD2F00089165001930A300031B
65244+:107150001064009028640002148000AF240500022F
65245+:107160001065009E240F0003106F00B435450AA47B
65246+:10717000240A0800118A0048000000005100003D68
65247+:107180003C0B80003C0480003483090090670012AF
65248+:1071900030E200FF004D7821000FC8802724000155
65249+:1071A0003C0A8000354F090091E50019354C0980F3
65250+:1071B0008D87002830A300FF0003150000475825E5
65251+:1071C0000004C4003C19600001793025370806FF2F
65252+:1071D000AD260000AD2800048DEA002C25280028EB
65253+:1071E000AD2A00088DEC0030AD2C000C8DE500348C
65254+:1071F000AD2500108DE400383C05800034AC093C1E
65255+:10720000AD2400148DE3001CAD2300188DE7002091
65256+:10721000AD27001C8DE20024AD2200208DF900284E
65257+:1072200034A20100AD3900248D830000AD0E0004AE
65258+:1072300034B90900AD0300008C47000C250200148E
65259+:10724000AD070008932B00123C04080090843FD83F
65260+:10725000AD000010317800FF030D302100064F0013
65261+:1072600000047C00012F702535CDFFFC03E00008F1
65262+:10727000AD0D000C35780900930600123C0508009E
65263+:1072800094A53FC830C800FF010D5021000A60805E
65264+:107290000A00063C018520211500005B000000006B
65265+:1072A0003C08080095083FCE3C06080094C63FC83D
65266+:1072B000010610213C0B800035790900933800113C
65267+:1072C000932A001935660A80330800FF94CF002AFC
65268+:1072D00000086082314500FF978A0058000C1E00AC
65269+:1072E000000524003047FFFF006410250047C0253B
65270+:1072F00001EA30213C0B4000030B402500066400EE
65271+:10730000AD280000AD2C0004932500183C030006B6
65272+:107310002528001400053E0000E31025AD220008DA
65273+:107320008F24002C3C05800034AC093CAD24000CBB
65274+:107330008F38001C34A20100254F0001AD38001029
65275+:107340008D830000AD0E000431EB7FFFAD03000024
65276+:107350008C47000C34B90900A78B0058AD07000812
65277+:10736000932B00123C04080090843FD8250200149F
65278+:10737000317800FF030D302100064F0000047C002F
65279+:10738000012F702535CDFFFCAD00001003E0000893
65280+:10739000AD0D000C3C02080094423FD23C050800B1
65281+:1073A00094A53FC835440AA43C07080094E73FC4AD
65282+:1073B000948B00000045C8210327C023000B1C004C
65283+:1073C0002706FFF200665025AD2A000CAD20001004
65284+:1073D000AD2C00140A00063025290018354F0AA4E8
65285+:1073E00095E50000956400280005140000043C00A9
65286+:1073F0003459810000EC5825AD39000CAD2B00103C
65287+:107400000A000630252900143C0C0800958C3FCE5C
65288+:107410000A000681258200015460FF56240A0800F4
65289+:1074200035580AA49706000000061C00006C502581
65290+:10743000AD2A000C0A000630252900103C03080084
65291+:1074400094633FD23C07080094E73FC83C0F080014
65292+:1074500095EF3FC494A4000095790028006710219F
65293+:10746000004F582300041C00001934002578FFEE5B
65294+:1074700000D87825346A8100AD2A000CAD2F0010A9
65295+:10748000AD200014AD2C00180A0006302529001C80
65296+:1074900003E00008240207D027BDFFE0AFB20018C8
65297+:1074A000AFB10014AFB00010AFBF001C0E00007CE5
65298+:1074B000008088218F8800548F87004C3C0580080D
65299+:1074C00034B20080011128213C1080002402008089
65300+:1074D000240300C000A72023AE0208183C06800841
65301+:1074E000AE03081C18800004AF850054ACC500042E
65302+:1074F0008CC90004AF89004C1220000936040980B1
65303+:107500000E0006F800000000924C00278E0B00745D
65304+:1075100001825004014B3021AE46000C3604098034
65305+:107520008C8E001C8F8F005C01CF682319A0000493
65306+:107530008FBF001C8C90001CAF90005C8FBF001CA4
65307+:107540008FB200188FB100148FB000100A00007EB7
65308+:1075500027BD00208F8600508F8300548F82004CFF
65309+:107560003C05800834A40080AC860050AC83003C0D
65310+:1075700003E00008ACA200043C0308008C63005444
65311+:1075800027BDFFF8308400FF2462000130A500FF12
65312+:107590003C010800AC22005430C600FF3C078000CC
65313+:1075A0008CE801780500FFFE3C0C7FFFA3A40003DC
65314+:1075B0008FAA0000358BFFFF014B4824000627C02F
65315+:1075C00001244025AFA8000034E201009043000AE6
65316+:1075D000A3A000023C1980FFA3A300018FAF00000D
65317+:1075E00030AE007F3738FFFF01F86024000E6E00D8
65318+:1075F0003C0A002034E50140018D58253549200022
65319+:107600002406FF803C04100027BD0008ACAB000C32
65320+:10761000ACA90014A4A00018A0A6001203E0000862
65321+:10762000ACE40178308800FF30A700FF3C03800005
65322+:107630008C6201780440FFFE3C0C8000358A0A0011
65323+:107640008D4B00203584014035850980AC8B0004CA
65324+:107650008D4900240007302B00061540AC89000836
65325+:10766000A088001090A3004CA083002D03E0000828
65326+:10767000A480001827BDFFE8308400FFAFBF0010D2
65327+:107680000E00075D30A500FF8F8300548FBF0010F0
65328+:107690003C06800034C50140344700402404FF907C
65329+:1076A0003C02100027BD0018ACA3000CA0A40012DF
65330+:1076B000ACA7001403E00008ACC2017827BDFFE0CE
65331+:1076C0003C088008AFBF001CAFB20018AFB1001477
65332+:1076D000AFB00010351000808E0600183C07800007
65333+:1076E000309200FF00C72025AE0400180E00007C79
65334+:1076F00030B100FF92030005346200080E00007EE6
65335+:10770000A2020005024020210E000771022028215C
65336+:10771000024020218FBF001C8FB200188FB10014CF
65337+:107720008FB0001024050005240600010A0007326E
65338+:1077300027BD00203C05800034A309809066000826
65339+:1077400030C200081040000F3C0A01013549080A08
65340+:10775000AC8900008CA80074AC8800043C070800C9
65341+:1077600090E73FD830E5001050A00008AC8000083A
65342+:107770003C0D800835AC00808D8B0058AC8B000828
65343+:107780002484000C03E00008008010210A0007B5E3
65344+:107790002484000C27BDFFE83C098000AFB0001036
65345+:1077A000AFBF00143526098090C8000924020006E6
65346+:1077B00000A05821310300FF3527090000808021F7
65347+:1077C000240500041062007B2408000294CF005CB2
65348+:1077D0003C0E020431EDFFFF01AE6025AE0C00004F
65349+:1077E00090CA00083144002010800008000000000A
65350+:1077F00090C2004E3C1F010337F90300305800FFD0
65351+:107800000319302524050008AE06000490F9001184
65352+:1078100090E6001290E40011333800FF00187082E7
65353+:1078200030CF00FF01CF5021014B6821308900FF8C
65354+:1078300031AAFFFF39230028000A60801460002C61
65355+:10784000020C482390E400123C198000372F0100FD
65356+:10785000308C00FF018B1821000310800045F821B7
65357+:10786000001F8400360706FFAD270004373F0900DC
65358+:1078700093EC001193EE0012372609800005C082B8
65359+:107880008DE4000C8CC5003431CD00FF01AB10211C
65360+:107890000058182100A4F8230008840000033F00CA
65361+:1078A00000F0302533F9FFFF318F00FC00D970253F
65362+:1078B0000158202101E9682100045080ADAE000C80
65363+:1078C0000E00007C012A80213C088008240B000463
65364+:1078D000350500800E00007EA0AB000902001021DB
65365+:1078E0008FBF00148FB0001003E0000827BD001800
65366+:1078F00090EC001190E300193C18080097183FCE57
65367+:10790000318200FF0002F882307000FF001FCE00BD
65368+:1079100000103C000327302500D870253C0F4000A4
65369+:1079200001CF68253C198000AD2D0000373F0900CC
65370+:1079300093EC001193EE0012372F010037260980D7
65371+:107940000005C0828DE4000C8CC5003431CD00FFF1
65372+:1079500001AB10210058182100A4F823000884006E
65373+:1079600000033F0000F0302533F9FFFF318F00FCAA
65374+:1079700000D970250158202101E9682100045080B8
65375+:10798000ADAE000C0E00007C012A80213C0880086E
65376+:10799000240B0004350500800E00007EA0AB00091A
65377+:1079A000020010218FBF00148FB0001003E0000808
65378+:1079B00027BD00180A0007C72408001227BDFFD002
65379+:1079C0003C038000AFB60028AFB50024AFB4002060
65380+:1079D000AFB10014AFBF002CAFB3001CAFB20018A2
65381+:1079E000AFB000103467010090E6000B309400FF48
65382+:1079F00030B500FF30C200300000B02110400099C7
65383+:107A000000008821346409809088000800082E0056
65384+:107A100000051E03046000C0240400048F86005487
65385+:107A20003C010800A0243FD83C0C8000AD800048F9
65386+:107A30003C048000348E010091CD000B31A5002064
65387+:107A400010A000073C078000349309809272000860
65388+:107A50000012860000107E0305E000C43C1F800871
65389+:107A600034EC0100918A000B34EB09809169000825
65390+:107A7000314400400004402B3123000800C8982303
65391+:107A80001460000224120003000090213C108000CA
65392+:107A900036180A8036040900970E002C90830011D6
65393+:107AA0009089001293050018307F00FF312800FFF5
65394+:107AB000024810210002C880930D0018033F78216E
65395+:107AC00001F1302130B100FF00D11821A78E0058FC
65396+:107AD0003C010800A4263FCE3C010800A4233FD06F
65397+:107AE00015A00002000000000000000D920B010B29
65398+:107AF0003065FFFF3C010800A4233FD2316A0040FB
65399+:107B00003C010800A4203FC83C010800A4203FC459
65400+:107B10001140000224A4000A24A4000B3091FFFFAE
65401+:107B20000E0001E7022020219206010B3C0C080008
65402+:107B3000958C3FD2004020210006698231A70001C8
65403+:107B40000E00060101872821004020210260282123
65404+:107B50000E00060C024030210E0007A1004020213B
65405+:107B600016C00069004020219212010B32560040DD
65406+:107B700012C000053C0500FF8C93000034AEFFFFEF
65407+:107B8000026E8024AC9000000E0001FB0220202138
65408+:107B90003C0F080091EF3FD831F10003122000168E
65409+:107BA0003C1380088F8200543C09800835280080EF
65410+:107BB000245F0001AD1F003C3C0580088CB9000427
65411+:107BC00003E02021033FC0231B000002AF9F0054AD
65412+:107BD0008CA400040E0006F8ACA400043C0780004E
65413+:107BE0008CEB00743C04800834830080004B5021EF
65414+:107BF000AC6A000C3C1380083670008002802021A3
65415+:107C000002A02821A200006B0E00075D3C1480003A
65416+:107C10008F920054368C0140AD92000C8F86004844
65417+:107C20003C151000344D000624D60001AF960048E4
65418+:107C30008FBF002CA18600128FB60028AD8D0014D6
65419+:107C40008FB3001CAE9501788FB200188FB5002459
65420+:107C50008FB400208FB100148FB0001003E0000833
65421+:107C600027BD003034640980908F0008000F760033
65422+:107C7000000E6E0305A00033347F090093F8001B4B
65423+:107C8000241900103C010800A0393FD8331300022A
65424+:107C90001260FF678F8600548F8200601446FF6574
65425+:107CA0003C0480000E00007C000000003C048008C2
65426+:107CB0003485008090A8000924060016310300FFD7
65427+:107CC0001066000D0000000090AB00093C070800A2
65428+:107CD00090E73FD824090008316400FF34EA00012E
65429+:107CE0003C010800A02A3FD81089002F240C000A6C
65430+:107CF000108C00282402000C0E00007E0000000002
65431+:107D00000A0008608F8600540E0007B9024028213F
65432+:107D10000A0008AE004020213C0B8008356A008034
65433+:107D20008D4600548CE9000C1120FF3DAF860054B5
65434+:107D3000240700143C010800A0273FD80A00085F70
65435+:107D40003C0C800090910008241200023C010800C5
65436+:107D5000A0323FD8323000201200000B2416000160
65437+:107D60008F8600540A0008602411000837F800804C
65438+:107D70008F020038AFE200048FF90004AF19003C15
65439+:107D80000A00086C3C0780008F8600540A000860D7
65440+:107D900024110004A0A200090E00007E00000000D3
65441+:107DA0000A0008608F860054240200140A00093A71
65442+:107DB000A0A2000927BDFFE8AFB000103C10800072
65443+:107DC000AFBF001436020100904400090E00075DA9
65444+:107DD000240500013C0480089099000E3483008043
65445+:107DE000909F000F906F00269089000A33F800FFE3
65446+:107DF00000196E000018740031EC00FF01AE502530
65447+:107E0000000C5A00014B3825312800FF3603014091
65448+:107E10003445600000E830252402FF813C04100056
65449+:107E2000AC66000C8FBF0014AC650014A062001299
65450+:107E3000AE0401788FB0001003E0000827BD0018E1
65451+:107E400027BDFFE8308400FFAFBF00100E00075DC4
65452+:107E500030A500FF3C05800034A4014034470040B9
65453+:107E60002406FF92AC870014A08600128F83005472
65454+:107E70008FBF00103C02100027BD0018AC83000C1F
65455+:107E800003E00008ACA2017827BDFFD8AFB0001016
65456+:107E9000308400FF30B000FF3C058000AFB100141B
65457+:107EA000AFBF0020AFB3001CAFB20018000410C277
65458+:107EB00034A60100320300023051000114600007B3
65459+:107EC00090D200093C098008353300809268000593
65460+:107ED0003107000810E0000C308A00100240202119
65461+:107EE0000E00078302202821240200018FBF0020FA
65462+:107EF0008FB3001C8FB200188FB100148FB0001028
65463+:107F000003E0000827BD00281540003434A50A000E
65464+:107F10008CB800248CAF0008130F004B00003821F0
65465+:107F20003C0D800835B30080926C00682406000286
65466+:107F3000318B00FF116600843C06800034C20100D2
65467+:107F40009263004C90590009307F00FF53F9000400
65468+:107F50003213007C10E00069000000003213007C46
65469+:107F60005660005C0240202116200009320D0001FD
65470+:107F70003C0C800035840100358B0A008D6500249F
65471+:107F80008C86000414A6FFD900001021320D0001D8
65472+:107F900011A0000E024020213C1880003710010083
65473+:107FA0008E0F000C8F8E005011EE000800000000B4
65474+:107FB0000E000843022028218E19000C3C1F800867
65475+:107FC00037F00080AE190050024020210E000771EA
65476+:107FD000022028210A00098F240200013C05080024
65477+:107FE0008CA5006424A400013C010800AC240064BA
65478+:107FF0001600000D00000000022028210E0007716D
65479+:1080000002402021926E0068240C000231CD00FF56
65480+:1080100011AC0022024020210E00094100000000A6
65481+:108020000A00098F240200010E00007024040001E0
65482+:10803000926B0025020B30250E00007EA266002503
65483+:108040000A0009D3022028218E6200188CDF000468
65484+:108050008CB9002400021E0217F9FFB13065007FC1
65485+:108060009268004C264400013093007F1265004066
65486+:10807000310300FF1464FFAB3C0D8008264700016C
65487+:1080800030F1007F30E200FF1225000B24070001D1
65488+:10809000004090210A00099C2411000124050004DD
65489+:1080A0000E000732240600010E0009410000000006
65490+:1080B0000A00098F240200012405FF8002452024C4
65491+:1080C00000859026324200FF004090210A00099C62
65492+:1080D000241100010E00084302202821320700303D
65493+:1080E00010E0FFA132100082024020210E00078321
65494+:1080F000022028210A00098F240200018E6900183D
65495+:108100000240202102202821012640250E0009647A
65496+:10811000AE6800189264004C240500032406000198
65497+:108120000E000732308400FF0E00007024040001AE
65498+:1081300092710025021150250E00007EA26A0025D2
65499+:108140000A00098F240200018E6F00183C1880007D
65500+:108150000240202101F87025022028210E0007711D
65501+:10816000AE6E00189264004C0A000A1B240500043D
65502+:10817000324A0080394900801469FF6A3C0D80084A
65503+:108180000A0009F42647000127BDFFC0AFB0001860
65504+:108190003C108000AFBF0038AFB70034AFB600303E
65505+:1081A000AFB5002CAFB40028AFB30024AFB20020AD
65506+:1081B0000E0005BEAFB1001C360201009045000B59
65507+:1081C0000E00097690440008144000E78FBF003885
65508+:1081D0003C08800835070080A0E0006B3606098067
65509+:1081E00090C50000240300503C17080026F73F907C
65510+:1081F00030A400FF3C13080026733FA01083000347
65511+:108200003C1080000000B82100009821241F0010BD
65512+:108210003611010036120A00361509808E580024E6
65513+:108220008E3400048EAF00208F8C00543C01080077
65514+:10823000A03F3FD836190A80972B002C8EF60000FD
65515+:10824000932A00180298702301EC68233C0108006F
65516+:10825000AC2E3FB43C010800AC2D3FB83C010800F7
65517+:10826000AC2C3FDCA78B005802C0F809315400FF4A
65518+:1082700030490002152000E930420001504000C49E
65519+:108280009227000992A90008312800081500000271
65520+:10829000241500030000A8213C0A80003543090092
65521+:1082A00035440A008C8D00249072001190700012E9
65522+:1082B000907F0011325900FF321100FF02B11021EE
65523+:1082C0000002C08033EF00FF0319B021028F70213C
65524+:1082D00002D4602125CB00103C010800A4363FCE1B
65525+:1082E0003C010800AC2D3FE03C010800A42C3FD02D
65526+:1082F0003C010800A42B3FCC3556010035540980C1
65527+:1083000035510E008F8700548F89005C8E850020C8
65528+:1083100024080006012730233C010800AC283FD484
65529+:1083200000A7282304C000B50000902104A000B3DA
65530+:1083300000C5502B114000B5000000003C010800B2
65531+:10834000AC263FB88E6200000040F8090000000033
65532+:108350003046000214C0007400408021304B000100
65533+:10836000556000118E6200043C0D08008DAD3FBCCD
65534+:108370003C0EC0003C04800001AE6025AE2C000025
65535+:108380008C980000330F000811E0FFFD0000000092
65536+:10839000963F000824120001A79F00408E39000478
65537+:1083A000AF9900388E6200040040F8090000000018
65538+:1083B0000202802532030002146000B300000000B6
65539+:1083C0003C09080095293FC43C06080094C63FD0EC
65540+:1083D0003C0A0800954A3FC63C0708008CE73FBCB2
65541+:1083E000012670213C0308008C633FE03C08080034
65542+:1083F00095083FDA01CA20218ED9000C00E9282116
65543+:10840000249F000200A878210067C02133E4FFFF09
65544+:10841000AF9900503C010800AC383FE03C01080037
65545+:10842000A42F3FC83C010800A42E3FD20E0001E754
65546+:10843000000000008F8D0048004020213C01080012
65547+:10844000A02D3FD98E62000825AC0001AF8C0048FA
65548+:108450000040F809000000008F85005402A0302180
65549+:108460000E00060C004020210E0007A10040202134
65550+:108470008E6B000C0160F809004020213C0A0800C6
65551+:10848000954A3FD23C06080094C63FC601464821A3
65552+:10849000252800020E0001FB3104FFFF3C05080007
65553+:1084A0008CA53FB43C0708008CE73FBC00A7202305
65554+:1084B0003C010800AC243FB414800006000000001A
65555+:1084C0003C0208008C423FD4344B00403C01080081
65556+:1084D000AC2B3FD4124000438F8E00448E2D0010F1
65557+:1084E0008F920044AE4D00208E2C0018AE4C00241C
65558+:1084F0003C04080094843FC80E0006FA0000000007
65559+:108500008F9F00548E6700103C010800AC3F3FDC99
65560+:1085100000E0F809000000003C1908008F393FB462
65561+:108520001720FF798F870054979300583C11800ED5
65562+:10853000321601000E000729A633002C16C0004594
65563+:10854000320300105460004C8EE5000432080040F5
65564+:108550005500001D8EF000088EE4000C0080F80924
65565+:10856000000000008FBF00388FB700348FB6003096
65566+:108570008FB5002C8FB400288FB300248FB2002059
65567+:108580008FB1001C8FB0001803E0000827BD004029
65568+:108590008F86003C36110E0000072E0000A6202515
65569+:1085A000AE0400808E4300208E500024AFA3001044
65570+:1085B000AE2300148FB20010AE320010AE30001C9B
65571+:1085C0000A000A75AE3000180200F8090000000029
65572+:1085D0008EE4000C0080F809000000000A000B2E59
65573+:1085E0008FBF003824180001240F0001A5C000200F
65574+:1085F000A5D800220A000B10ADCF00243C010800D2
65575+:10860000AC203FB80A000AA68E6200003C010800B8
65576+:10861000AC253FB80A000AA68E6200009224000929
65577+:108620000E000771000028218FBF00388FB700347B
65578+:108630008FB600308FB5002C8FB400288FB3002484
65579+:108640008FB200208FB1001C8FB0001803E000082B
65580+:1086500027BD00403C1480009295010900002821AC
65581+:108660000E00084332A400FF320300105060FFB830
65582+:10867000320800408EE5000400A0F8090000000068
65583+:108680000A000B28320800405240FFA89793005878
65584+:108690008E3400148F930044AE7400208E35001C7D
65585+:1086A000AE7500240A000B1F979300588F820014A8
65586+:1086B0000004218003E00008008210213C078008AC
65587+:1086C00034E200809043006900804021106000097E
65588+:1086D0003C0401003C0708008CE73FDC8F8300303E
65589+:1086E00000E32023048000089389001C14E30003A6
65590+:1086F0000100202103E00008008010213C0401005B
65591+:1087000003E00008008010211120000B00673823CF
65592+:108710003C0D800035AC0980918B007C316A0002F1
65593+:10872000114000202409003400E9702B15C0FFF12E
65594+:108730000100202100E938232403FFFC00A3C82402
65595+:1087400000E3C02400F9782B15E0FFEA030820219C
65596+:1087500030C400030004102314C000143049000387
65597+:108760000000302100A9782101E6702100EE682B7D
65598+:1087700011A0FFE03C0401002D3800010006C82BC9
65599+:10878000010548210319382414E0FFDA2524FFFCF1
65600+:108790002402FFFC00A218240068202103E0000846
65601+:1087A000008010210A000B9E240900303C0C800040
65602+:1087B0003586098090CB007C316A00041540FFE9C2
65603+:1087C000240600040A000BAD000030213C03080021
65604+:1087D0008C63005C8F82001827BDFFE0AFBF0018DC
65605+:1087E000AFB1001410620005AFB00010000329C043
65606+:1087F00024A40280AF840014AF8300183C108000D2
65607+:1088000036020A0094450032361101000E000B7F3B
65608+:1088100030A43FFF8E240000241FFF803C11008005
65609+:108820000082C021031F60243309007F000CC9406F
65610+:1088300003294025330E0078362F00033C0D10002D
65611+:10884000010D502501CF5825AE0C002836080980AF
65612+:10885000AE0C080CAE0B082CAE0A08309103006970
65613+:108860003C06800C0126382110600006AF870034DA
65614+:108870008D09003C8D03006C0123382318E0008231
65615+:10888000000000003C0B8008356A00803C1080002E
65616+:10889000A1400069360609808CC200383C06800081
65617+:1088A00034C50A0090A8003C310C00201180001A49
65618+:1088B000AF820030240D00013C0E800035D10A004B
65619+:1088C000A38D001CAF8000248E2400248F850024FB
65620+:1088D000240D0008AF800020AF8000283C01080074
65621+:1088E000A42D3FC63C010800A4203FDA0E000B83F4
65622+:1088F000000030219228003C8FBF00188FB1001477
65623+:108900008FB0001000086142AF82002C27BD00200C
65624+:1089100003E000083182000190B80032240E00010B
65625+:10892000330F00FF000F2182108E00412419000236
65626+:108930001099006434C40AC03C03800034640A0007
65627+:108940008C8F002415E0001E34660900909F0030D3
65628+:108950002418000533F9003F1338004E24030001AA
65629+:108960008F860020A383001CAF860028AF860024DA
65630+:108970003C0E800035D10A008E2400248F8500240F
65631+:10898000240D00083C010800A42D3FC63C0108004E
65632+:10899000A4203FDA0E000B83000000009228003C68
65633+:1089A0008FBF00188FB100148FB000100008614213
65634+:1089B000AF82002C27BD002003E0000831820001B7
65635+:1089C0008C8A00088C8B00248CD000643C0E8000C4
65636+:1089D00035D10A00014B2823AF900024A380001C4E
65637+:1089E000AF8500288E2400248F8600208F850024E8
65638+:1089F000240D00083C010800A42D3FC63C010800DE
65639+:108A0000A4203FDA0E000B83000000009228003CF7
65640+:108A10008FBF00188FB100148FB0001000086142A2
65641+:108A2000AF82002C27BD002003E000083182000146
65642+:108A300090A200303051003F5224002834C50AC0B3
65643+:108A40008CB000241600002234CB09008CA600480C
65644+:108A50003C0A7FFF3545FFFF00C510243C0E800017
65645+:108A6000AF82002035C509008F8800208CAD0060E2
65646+:108A7000010D602B15800002010020218CA40060F4
65647+:108A80000A000C22AF8400208D02006C0A000BFC4F
65648+:108A90003C0680008C8200488F8600203C097FFFC6
65649+:108AA0003527FFFF004788243C0480082403000189
65650+:108AB000AF910028AC80006CA383001C0A000C302E
65651+:108AC000AF8600248C9F00140A000C22AF9F002068
65652+:108AD0008D6200680A000C6C3C0E800034C4098072
65653+:108AE0008C8900708CA300140123382B10E0000443
65654+:108AF000000000008C8200700A000C6C3C0E8000AC
65655+:108B00008CA200140A000C6C3C0E80008F8500249F
65656+:108B100027BDFFE0AFBF0018AFB1001414A00008DC
65657+:108B2000AFB000103C04800034870A0090E60030AB
65658+:108B30002402000530C3003F106200B934840900EC
65659+:108B40008F91002000A080213C048000348E0A0018
65660+:108B50008DCD00043C0608008CC63FB831A73FFF0E
65661+:108B600000E6602B5580000100E03021938F001C4F
65662+:108B700011E0007800D0282B349F098093F9007C05
65663+:108B800033380002130000792403003400C3102B93
65664+:108B9000144000D90000000000C3302300D0282B6F
65665+:108BA0003C010800A4233FC414A0006E0200182159
65666+:108BB0003C0408008C843FB40064402B5500000145
65667+:108BC000006020213C05800034A90A00912A003C65
65668+:108BD0003C010800AC243FBC31430020146000037A
65669+:108BE0000000482134AB0E008D6900188F88002CDE
65670+:108BF0000128202B1080005F000000003C050800C9
65671+:108C00008CA53FBC00A96821010D602B1180005C80
65672+:108C100000B0702B0109382300E028213C01080036
65673+:108C2000AC273FBC12000003240AFFFC10B0008DEB
65674+:108C30003224000300AA18243C010800A4203FDAD3
65675+:108C40003C010800AC233FBC006028218F84002435
65676+:108C5000120400063C0B80088D6C006C0200202181
65677+:108C6000AF91002025900001AD70006C8F8D002821
65678+:108C700000858823AF91002401A52023AF8400281C
65679+:108C80001220000224070018240700103C18800856
65680+:108C90003706008090CF00683C010800A0273FD82D
65681+:108CA0002407000131EE00FF11C70047000000005B
65682+:108CB00014800018000028213C06800034D109806F
65683+:108CC00034CD010091A600098E2C001824C40001A7
65684+:108CD000000C86023205007F308B007F1165007F1B
65685+:108CE0002407FF803C19800837290080A124004C0C
65686+:108CF0003C0808008D083FD4241800023C010800FD
65687+:108D0000A0384019350F00083C010800AC2F3FD4B3
65688+:108D1000240500103C02800034440A009083003C8B
65689+:108D2000307F002013E0000500A02021240A00016C
65690+:108D30003C010800AC2A3FBC34A400018FBF0018DE
65691+:108D40008FB100148FB000100080102103E00008E4
65692+:108D500027BD00203C010800A4203FC410A0FF94C0
65693+:108D6000020018210A000CC000C018210A000CB72C
65694+:108D7000240300303C0508008CA53FBC00B0702BDC
65695+:108D800011C0FFA8000000003C19080097393FC43B
65696+:108D90000325C0210307782B11E000072CAA00044B
65697+:108DA0003C0360008C625404305F003F17E0FFE337
65698+:108DB000240400422CAA00041140FF9A240400421B
65699+:108DC0000A000D248FBF00181528FFB9000000000D
65700+:108DD0008CCA00183C1F800024020002015F182585
65701+:108DE000ACC3001837F90A00A0C200689329003C00
65702+:108DF0002404000400A01021312800203C010800B8
65703+:108E0000A0244019110000022405001024020001D2
65704+:108E10003C010800AC223FB40A000D1A3C0280005D
65705+:108E20008F8800288C8900600109282B14A000027B
65706+:108E3000010088218C9100603C048000348B0E007E
65707+:108E40008D640018240A000102202821022030210C
65708+:108E5000A38A001C0E000B83022080210A000CA6AE
65709+:108E6000AF82002C00045823122000073164000355
65710+:108E70003C0E800035C7098090ED007C31AC0004C9
65711+:108E800015800019248F00043C010800A4243FDA57
65712+:108E90003C1F080097FF3FDA03E5C82100D9C02B2B
65713+:108EA0001300FF6B8F8400242CA6000514C0FFA3C1
65714+:108EB0002404004230A200031440000200A2182340
65715+:108EC00024A3FFFC3C010800AC233FBC3C0108008C
65716+:108ED000A4203FDA0A000CE70060282100C77024B4
65717+:108EE0000A000D0D01C720263C010800A42F3FDA1F
65718+:108EF0000A000D78000000003C010800AC203FBCD7
65719+:108F00000A000D23240400428F8300283C058000C2
65720+:108F100034AA0A00146000060000102191470030B6
65721+:108F20002406000530E400FF108600030000000066
65722+:108F300003E0000800000000914B0048316900FF89
65723+:108F4000000941C21500FFFA3C0680083C040800F5
65724+:108F500094843FC43C0308008C633FDC3C19080048
65725+:108F60008F393FBC3C0F080095EF3FDA0064C02109
65726+:108F70008CCD00040319702101CF602134AB0E00A9
65727+:108F8000018D282318A0001D00000000914F004C07
65728+:108F90008F8C0034956D001031EE00FF8D89000438
65729+:108FA00001AE30238D8A000030CEFFFF000E290075
65730+:108FB0000125C82100003821014720210325182B55
65731+:108FC0000083C021AD990004AD980000918F000A84
65732+:108FD00001CF6821A18D000A956500128F8A0034A7
65733+:108FE000A5450008954B003825690001A5490038C2
65734+:108FF0009148000D35070008A147000D03E0000867
65735+:109000000000000027BDFFD8AFB000189388001CF7
65736+:109010008FB000143C0A80003C197FFF8F8700242A
65737+:109020003738FFFFAFBF0020AFB1001C355F0A002B
65738+:109030000218182493EB003C00087FC03C02BFFFDD
65739+:10904000006F60252CF000013449FFFF3C1F080031
65740+:109050008FFF3FDC8F9900303C18080097183FD2F3
65741+:1090600001897824001047803C07EFFF3C05F0FFA2
65742+:1090700001E818253C1180003169002034E2FFFF2F
65743+:1090800034ADFFFF362E098027A50010240600020C
65744+:1090900003F96023270B0002354A0E0000621824F2
65745+:1090A0000080802115200002000040218D48001C16
65746+:1090B000A7AB0012058000392407000030E800FF4C
65747+:1090C00000083F00006758253C028008AFAB001441
65748+:1090D000344F008091EA00683C08080091083FD9AD
65749+:1090E0003C09DFFF352CFFFF000AF82B3C0208008B
65750+:1090F00094423FCCA3A80011016CC024001FCF40B4
65751+:10910000031918258FA70010AFA300143C0C08000A
65752+:10911000918C3FDBA7A200168FAB001400ED482412
65753+:109120003C0F01003C0A0FFF012FC82531980003B6
65754+:10913000355FFFFF016D40243C027000033F38247F
65755+:1091400000181E0000E2482501037825AFAF001487
65756+:10915000AFA9001091CC007C0E000092A3AC0015CA
65757+:10916000362D0A0091A6003C30C400201080000675
65758+:10917000260200083C11080096313FC8262EFFFF4A
65759+:109180003C010800A42E3FC88FBF00208FB1001CF7
65760+:109190008FB0001803E0000827BD00288F8B002C3B
65761+:1091A000010B502B5540FFC5240700010A000E0497
65762+:1091B00030E800FF9383001C3C02800027BDFFD8ED
65763+:1091C00034480A0000805021AFBF002034460AC056
65764+:1091D000010028211060000E3444098091070030FE
65765+:1091E000240B00058F89002030EC003F118B000B11
65766+:1091F00000003821AFA900103C0B80088D69006C7D
65767+:10920000AFAA00180E00015AAFA90014A380001CD9
65768+:109210008FBF002003E0000827BD00288D1F0048F5
65769+:109220003C1808008F183FBC8F9900283C027FFF34
65770+:109230008D0800443443FFFFAFA900103C0B8008A9
65771+:109240008D69006C03E370240319782101CF682332
65772+:1092500001A83821AFAA00180E00015AAFA90014C6
65773+:109260000A000E58A380001C3C05800034A60A00AA
65774+:1092700090C7003C3C06080094C63FDA3C02080058
65775+:109280008C423FD430E30020000624001060001E12
65776+:10929000004438253C0880083505008090A300680C
65777+:1092A00000004821240800010000282124040001B6
65778+:1092B0003C0680008CCD017805A0FFFE34CF014034
65779+:1092C000ADE800083C0208008C423FDCA5E5000444
65780+:1092D000A5E40006ADE2000C3C04080090843FD9F0
65781+:1092E0003C03800834790080A1E40012ADE700144B
65782+:1092F000A5E900189338004C3C0E1000A1F8002D91
65783+:1093000003E00008ACCE017834A90E008D28001CC3
65784+:109310003C0C08008D8C3FBC952B0016952A001440
65785+:10932000018648213164FFFF0A000E803145FFFFAE
65786+:109330003C04800034830A009065003C30A2002089
65787+:109340001040001934870E00000040210000382131
65788+:10935000000020213C0680008CC901780520FFFE1A
65789+:1093600034CA014034CF010091EB0009AD48000838
65790+:109370003C0E08008DCE3FDC240DFF91240C0040F4
65791+:109380003C081000A5440004A5470006AD4E000CA3
65792+:10939000A14D0012AD4C0014A5400018A14B002DAA
65793+:1093A00003E00008ACC801788CE8001894E60012CD
65794+:1093B00094E4001030C7FFFF0A000EA93084FFFFBD
65795+:1093C0003C04800034830A009065003C30A20020F9
65796+:1093D0001040002727BDFFF82409000100003821B4
65797+:1093E000240800013C0680008CCA01780540FFFE7D
65798+:1093F0003C0280FF34C40100908D00093C0C080041
65799+:10940000918C4019A3AD00038FAB00003185007F24
65800+:109410003459FFFF01665025AFAA00009083000A6F
65801+:10942000A3A0000200057E00A3A300018FB80000E6
65802+:1094300034CB0140240C30000319702401CF68257F
65803+:10944000AD6D000C27BD0008AD6C0014A5600018C0
65804+:10945000AD690008A56700042409FF80A56800061F
65805+:109460003C081000A169001203E00008ACC80178B4
65806+:1094700034870E008CE9001894E6001294E4001082
65807+:1094800030C8FFFF0A000ECD3087FFFF27BDFFE089
65808+:10949000AFB100143C118000AFB00010AFBF001896
65809+:1094A00036380A00970F0032363001000E000B7F6D
65810+:1094B00031E43FFF8E0E0000240DFF803C042000AD
65811+:1094C00001C25821016D6024000C4940316A007FBF
65812+:1094D000012A4025010438253C048008AE270830C5
65813+:1094E0003486008090C500682403000230A200FF8B
65814+:1094F000104300048F9F00208F990024AC9F0068C8
65815+:10950000AC9900648FBF00188FB100148FB00010A9
65816+:1095100003E0000827BD00203C0A0800254A3A80E5
65817+:109520003C09080025293B103C08080025082F1C91
65818+:109530003C07080024E73BDC3C06080024C639044D
65819+:109540003C05080024A536583C0408002484325CFD
65820+:109550003C030800246339B83C0208002442375415
65821+:109560003C010800AC2A3F983C010800AC293F941C
65822+:109570003C010800AC283F903C010800AC273F9C10
65823+:109580003C010800AC263FAC3C010800AC253FA4E0
65824+:109590003C010800AC243FA03C010800AC233FB0D4
65825+:1095A0003C010800AC223FA803E0000800000000D6
65826+:1095B00080000940800009008008010080080080C8
65827+:1095C00080080000800E00008008008080080000F5
65828+:1095D00080000A8080000A00800009808000090065
65829+:00000001FF
65830diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
65831index eb14e05..5156de7 100644
65832--- a/fs/9p/vfs_addr.c
65833+++ b/fs/9p/vfs_addr.c
65834@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
65835
65836 retval = v9fs_file_write_internal(inode,
65837 v9inode->writeback_fid,
65838- (__force const char __user *)buffer,
65839+ (const char __force_user *)buffer,
65840 len, &offset, 0);
65841 if (retval > 0)
65842 retval = 0;
65843diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
65844index 3662f1d..90558b5 100644
65845--- a/fs/9p/vfs_inode.c
65846+++ b/fs/9p/vfs_inode.c
65847@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
65848 void
65849 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
65850 {
65851- char *s = nd_get_link(nd);
65852+ const char *s = nd_get_link(nd);
65853
65854 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
65855 dentry, IS_ERR(s) ? "<error>" : s);
65856diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
65857index 270c481..0d8a962 100644
65858--- a/fs/Kconfig.binfmt
65859+++ b/fs/Kconfig.binfmt
65860@@ -106,7 +106,7 @@ config HAVE_AOUT
65861
65862 config BINFMT_AOUT
65863 tristate "Kernel support for a.out and ECOFF binaries"
65864- depends on HAVE_AOUT
65865+ depends on HAVE_AOUT && BROKEN
65866 ---help---
65867 A.out (Assembler.OUTput) is a set of formats for libraries and
65868 executables used in the earliest versions of UNIX. Linux used
65869diff --git a/fs/afs/inode.c b/fs/afs/inode.c
65870index 8a1d38e..300a14e 100644
65871--- a/fs/afs/inode.c
65872+++ b/fs/afs/inode.c
65873@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
65874 struct afs_vnode *vnode;
65875 struct super_block *sb;
65876 struct inode *inode;
65877- static atomic_t afs_autocell_ino;
65878+ static atomic_unchecked_t afs_autocell_ino;
65879
65880 _enter("{%x:%u},%*.*s,",
65881 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
65882@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
65883 data.fid.unique = 0;
65884 data.fid.vnode = 0;
65885
65886- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
65887+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
65888 afs_iget5_autocell_test, afs_iget5_set,
65889 &data);
65890 if (!inode) {
65891diff --git a/fs/aio.c b/fs/aio.c
65892index a793f70..46f45af 100644
65893--- a/fs/aio.c
65894+++ b/fs/aio.c
65895@@ -404,7 +404,7 @@ static int aio_setup_ring(struct kioctx *ctx)
65896 size += sizeof(struct io_event) * nr_events;
65897
65898 nr_pages = PFN_UP(size);
65899- if (nr_pages < 0)
65900+ if (nr_pages <= 0)
65901 return -EINVAL;
65902
65903 file = aio_private_file(ctx, nr_pages);
65904diff --git a/fs/attr.c b/fs/attr.c
65905index 6530ced..4a827e2 100644
65906--- a/fs/attr.c
65907+++ b/fs/attr.c
65908@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
65909 unsigned long limit;
65910
65911 limit = rlimit(RLIMIT_FSIZE);
65912+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
65913 if (limit != RLIM_INFINITY && offset > limit)
65914 goto out_sig;
65915 if (offset > inode->i_sb->s_maxbytes)
65916diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
65917index 116fd38..c04182da 100644
65918--- a/fs/autofs4/waitq.c
65919+++ b/fs/autofs4/waitq.c
65920@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
65921 {
65922 unsigned long sigpipe, flags;
65923 mm_segment_t fs;
65924- const char *data = (const char *)addr;
65925+ const char __user *data = (const char __force_user *)addr;
65926 ssize_t wr = 0;
65927
65928 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
65929@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
65930 return 1;
65931 }
65932
65933+#ifdef CONFIG_GRKERNSEC_HIDESYM
65934+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
65935+#endif
65936+
65937 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
65938 enum autofs_notify notify)
65939 {
65940@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
65941
65942 /* If this is a direct mount request create a dummy name */
65943 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
65944+#ifdef CONFIG_GRKERNSEC_HIDESYM
65945+ /* this name does get written to userland via autofs4_write() */
65946+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
65947+#else
65948 qstr.len = sprintf(name, "%p", dentry);
65949+#endif
65950 else {
65951 qstr.len = autofs4_getpath(sbi, dentry, &name);
65952 if (!qstr.len) {
65953diff --git a/fs/befs/endian.h b/fs/befs/endian.h
65954index 2722387..56059b5 100644
65955--- a/fs/befs/endian.h
65956+++ b/fs/befs/endian.h
65957@@ -11,7 +11,7 @@
65958
65959 #include <asm/byteorder.h>
65960
65961-static inline u64
65962+static inline u64 __intentional_overflow(-1)
65963 fs64_to_cpu(const struct super_block *sb, fs64 n)
65964 {
65965 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
65966@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
65967 return (__force fs64)cpu_to_be64(n);
65968 }
65969
65970-static inline u32
65971+static inline u32 __intentional_overflow(-1)
65972 fs32_to_cpu(const struct super_block *sb, fs32 n)
65973 {
65974 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
65975@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
65976 return (__force fs32)cpu_to_be32(n);
65977 }
65978
65979-static inline u16
65980+static inline u16 __intentional_overflow(-1)
65981 fs16_to_cpu(const struct super_block *sb, fs16 n)
65982 {
65983 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
65984diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
65985index 4c55668..eeae150 100644
65986--- a/fs/binfmt_aout.c
65987+++ b/fs/binfmt_aout.c
65988@@ -16,6 +16,7 @@
65989 #include <linux/string.h>
65990 #include <linux/fs.h>
65991 #include <linux/file.h>
65992+#include <linux/security.h>
65993 #include <linux/stat.h>
65994 #include <linux/fcntl.h>
65995 #include <linux/ptrace.h>
65996@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
65997 #endif
65998 # define START_STACK(u) ((void __user *)u.start_stack)
65999
66000+ memset(&dump, 0, sizeof(dump));
66001+
66002 fs = get_fs();
66003 set_fs(KERNEL_DS);
66004 has_dumped = 1;
66005@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
66006
66007 /* If the size of the dump file exceeds the rlimit, then see what would happen
66008 if we wrote the stack, but not the data area. */
66009+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
66010 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
66011 dump.u_dsize = 0;
66012
66013 /* Make sure we have enough room to write the stack and data areas. */
66014+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
66015 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
66016 dump.u_ssize = 0;
66017
66018@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
66019 rlim = rlimit(RLIMIT_DATA);
66020 if (rlim >= RLIM_INFINITY)
66021 rlim = ~0;
66022+
66023+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
66024 if (ex.a_data + ex.a_bss > rlim)
66025 return -ENOMEM;
66026
66027@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
66028
66029 install_exec_creds(bprm);
66030
66031+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66032+ current->mm->pax_flags = 0UL;
66033+#endif
66034+
66035+#ifdef CONFIG_PAX_PAGEEXEC
66036+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
66037+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
66038+
66039+#ifdef CONFIG_PAX_EMUTRAMP
66040+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
66041+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
66042+#endif
66043+
66044+#ifdef CONFIG_PAX_MPROTECT
66045+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
66046+ current->mm->pax_flags |= MF_PAX_MPROTECT;
66047+#endif
66048+
66049+ }
66050+#endif
66051+
66052 if (N_MAGIC(ex) == OMAGIC) {
66053 unsigned long text_addr, map_size;
66054 loff_t pos;
66055@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
66056 return error;
66057
66058 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
66059- PROT_READ | PROT_WRITE | PROT_EXEC,
66060+ PROT_READ | PROT_WRITE,
66061 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
66062 fd_offset + ex.a_text);
66063 if (error != N_DATADDR(ex))
66064diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
66065index d925f55..d31f527 100644
66066--- a/fs/binfmt_elf.c
66067+++ b/fs/binfmt_elf.c
66068@@ -34,6 +34,7 @@
66069 #include <linux/utsname.h>
66070 #include <linux/coredump.h>
66071 #include <linux/sched.h>
66072+#include <linux/xattr.h>
66073 #include <asm/uaccess.h>
66074 #include <asm/param.h>
66075 #include <asm/page.h>
66076@@ -47,7 +48,7 @@
66077
66078 static int load_elf_binary(struct linux_binprm *bprm);
66079 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
66080- int, int, unsigned long);
66081+ int, int, unsigned long) __intentional_overflow(-1);
66082
66083 #ifdef CONFIG_USELIB
66084 static int load_elf_library(struct file *);
66085@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
66086 #define elf_core_dump NULL
66087 #endif
66088
66089+#ifdef CONFIG_PAX_MPROTECT
66090+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
66091+#endif
66092+
66093+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66094+static void elf_handle_mmap(struct file *file);
66095+#endif
66096+
66097 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66098 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
66099 #else
66100@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
66101 .load_binary = load_elf_binary,
66102 .load_shlib = load_elf_library,
66103 .core_dump = elf_core_dump,
66104+
66105+#ifdef CONFIG_PAX_MPROTECT
66106+ .handle_mprotect= elf_handle_mprotect,
66107+#endif
66108+
66109+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66110+ .handle_mmap = elf_handle_mmap,
66111+#endif
66112+
66113 .min_coredump = ELF_EXEC_PAGESIZE,
66114 };
66115
66116@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
66117
66118 static int set_brk(unsigned long start, unsigned long end)
66119 {
66120+ unsigned long e = end;
66121+
66122 start = ELF_PAGEALIGN(start);
66123 end = ELF_PAGEALIGN(end);
66124 if (end > start) {
66125@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
66126 if (BAD_ADDR(addr))
66127 return addr;
66128 }
66129- current->mm->start_brk = current->mm->brk = end;
66130+ current->mm->start_brk = current->mm->brk = e;
66131 return 0;
66132 }
66133
66134@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66135 elf_addr_t __user *u_rand_bytes;
66136 const char *k_platform = ELF_PLATFORM;
66137 const char *k_base_platform = ELF_BASE_PLATFORM;
66138- unsigned char k_rand_bytes[16];
66139+ u32 k_rand_bytes[4];
66140 int items;
66141 elf_addr_t *elf_info;
66142 int ei_index = 0;
66143 const struct cred *cred = current_cred();
66144 struct vm_area_struct *vma;
66145+ unsigned long saved_auxv[AT_VECTOR_SIZE];
66146
66147 /*
66148 * In some cases (e.g. Hyper-Threading), we want to avoid L1
66149@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66150 * Generate 16 random bytes for userspace PRNG seeding.
66151 */
66152 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
66153- u_rand_bytes = (elf_addr_t __user *)
66154- STACK_ALLOC(p, sizeof(k_rand_bytes));
66155+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
66156+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
66157+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
66158+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
66159+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
66160+ u_rand_bytes = (elf_addr_t __user *) p;
66161 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
66162 return -EFAULT;
66163
66164@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66165 return -EFAULT;
66166 current->mm->env_end = p;
66167
66168+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
66169+
66170 /* Put the elf_info on the stack in the right place. */
66171 sp = (elf_addr_t __user *)envp + 1;
66172- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
66173+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
66174 return -EFAULT;
66175 return 0;
66176 }
66177@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
66178 an ELF header */
66179
66180 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66181- struct file *interpreter, unsigned long *interp_map_addr,
66182+ struct file *interpreter,
66183 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
66184 {
66185 struct elf_phdr *eppnt;
66186- unsigned long load_addr = 0;
66187+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
66188 int load_addr_set = 0;
66189 unsigned long last_bss = 0, elf_bss = 0;
66190- unsigned long error = ~0UL;
66191+ unsigned long error = -EINVAL;
66192 unsigned long total_size;
66193 int i;
66194
66195@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66196 goto out;
66197 }
66198
66199+#ifdef CONFIG_PAX_SEGMEXEC
66200+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
66201+ pax_task_size = SEGMEXEC_TASK_SIZE;
66202+#endif
66203+
66204 eppnt = interp_elf_phdata;
66205 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
66206 if (eppnt->p_type == PT_LOAD) {
66207@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66208 map_addr = elf_map(interpreter, load_addr + vaddr,
66209 eppnt, elf_prot, elf_type, total_size);
66210 total_size = 0;
66211- if (!*interp_map_addr)
66212- *interp_map_addr = map_addr;
66213 error = map_addr;
66214 if (BAD_ADDR(map_addr))
66215 goto out;
66216@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66217 k = load_addr + eppnt->p_vaddr;
66218 if (BAD_ADDR(k) ||
66219 eppnt->p_filesz > eppnt->p_memsz ||
66220- eppnt->p_memsz > TASK_SIZE ||
66221- TASK_SIZE - eppnt->p_memsz < k) {
66222+ eppnt->p_memsz > pax_task_size ||
66223+ pax_task_size - eppnt->p_memsz < k) {
66224 error = -ENOMEM;
66225 goto out;
66226 }
66227@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66228 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
66229
66230 /* Map the last of the bss segment */
66231- error = vm_brk(elf_bss, last_bss - elf_bss);
66232- if (BAD_ADDR(error))
66233- goto out;
66234+ if (last_bss > elf_bss) {
66235+ error = vm_brk(elf_bss, last_bss - elf_bss);
66236+ if (BAD_ADDR(error))
66237+ goto out;
66238+ }
66239 }
66240
66241 error = load_addr;
66242@@ -634,6 +666,336 @@ out:
66243 return error;
66244 }
66245
66246+#ifdef CONFIG_PAX_PT_PAX_FLAGS
66247+#ifdef CONFIG_PAX_SOFTMODE
66248+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
66249+{
66250+ unsigned long pax_flags = 0UL;
66251+
66252+#ifdef CONFIG_PAX_PAGEEXEC
66253+ if (elf_phdata->p_flags & PF_PAGEEXEC)
66254+ pax_flags |= MF_PAX_PAGEEXEC;
66255+#endif
66256+
66257+#ifdef CONFIG_PAX_SEGMEXEC
66258+ if (elf_phdata->p_flags & PF_SEGMEXEC)
66259+ pax_flags |= MF_PAX_SEGMEXEC;
66260+#endif
66261+
66262+#ifdef CONFIG_PAX_EMUTRAMP
66263+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
66264+ pax_flags |= MF_PAX_EMUTRAMP;
66265+#endif
66266+
66267+#ifdef CONFIG_PAX_MPROTECT
66268+ if (elf_phdata->p_flags & PF_MPROTECT)
66269+ pax_flags |= MF_PAX_MPROTECT;
66270+#endif
66271+
66272+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66273+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
66274+ pax_flags |= MF_PAX_RANDMMAP;
66275+#endif
66276+
66277+ return pax_flags;
66278+}
66279+#endif
66280+
66281+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
66282+{
66283+ unsigned long pax_flags = 0UL;
66284+
66285+#ifdef CONFIG_PAX_PAGEEXEC
66286+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
66287+ pax_flags |= MF_PAX_PAGEEXEC;
66288+#endif
66289+
66290+#ifdef CONFIG_PAX_SEGMEXEC
66291+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
66292+ pax_flags |= MF_PAX_SEGMEXEC;
66293+#endif
66294+
66295+#ifdef CONFIG_PAX_EMUTRAMP
66296+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
66297+ pax_flags |= MF_PAX_EMUTRAMP;
66298+#endif
66299+
66300+#ifdef CONFIG_PAX_MPROTECT
66301+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
66302+ pax_flags |= MF_PAX_MPROTECT;
66303+#endif
66304+
66305+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66306+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
66307+ pax_flags |= MF_PAX_RANDMMAP;
66308+#endif
66309+
66310+ return pax_flags;
66311+}
66312+#endif
66313+
66314+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
66315+#ifdef CONFIG_PAX_SOFTMODE
66316+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
66317+{
66318+ unsigned long pax_flags = 0UL;
66319+
66320+#ifdef CONFIG_PAX_PAGEEXEC
66321+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
66322+ pax_flags |= MF_PAX_PAGEEXEC;
66323+#endif
66324+
66325+#ifdef CONFIG_PAX_SEGMEXEC
66326+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
66327+ pax_flags |= MF_PAX_SEGMEXEC;
66328+#endif
66329+
66330+#ifdef CONFIG_PAX_EMUTRAMP
66331+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
66332+ pax_flags |= MF_PAX_EMUTRAMP;
66333+#endif
66334+
66335+#ifdef CONFIG_PAX_MPROTECT
66336+ if (pax_flags_softmode & MF_PAX_MPROTECT)
66337+ pax_flags |= MF_PAX_MPROTECT;
66338+#endif
66339+
66340+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66341+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
66342+ pax_flags |= MF_PAX_RANDMMAP;
66343+#endif
66344+
66345+ return pax_flags;
66346+}
66347+#endif
66348+
66349+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
66350+{
66351+ unsigned long pax_flags = 0UL;
66352+
66353+#ifdef CONFIG_PAX_PAGEEXEC
66354+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
66355+ pax_flags |= MF_PAX_PAGEEXEC;
66356+#endif
66357+
66358+#ifdef CONFIG_PAX_SEGMEXEC
66359+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
66360+ pax_flags |= MF_PAX_SEGMEXEC;
66361+#endif
66362+
66363+#ifdef CONFIG_PAX_EMUTRAMP
66364+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
66365+ pax_flags |= MF_PAX_EMUTRAMP;
66366+#endif
66367+
66368+#ifdef CONFIG_PAX_MPROTECT
66369+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
66370+ pax_flags |= MF_PAX_MPROTECT;
66371+#endif
66372+
66373+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66374+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
66375+ pax_flags |= MF_PAX_RANDMMAP;
66376+#endif
66377+
66378+ return pax_flags;
66379+}
66380+#endif
66381+
66382+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66383+static unsigned long pax_parse_defaults(void)
66384+{
66385+ unsigned long pax_flags = 0UL;
66386+
66387+#ifdef CONFIG_PAX_SOFTMODE
66388+ if (pax_softmode)
66389+ return pax_flags;
66390+#endif
66391+
66392+#ifdef CONFIG_PAX_PAGEEXEC
66393+ pax_flags |= MF_PAX_PAGEEXEC;
66394+#endif
66395+
66396+#ifdef CONFIG_PAX_SEGMEXEC
66397+ pax_flags |= MF_PAX_SEGMEXEC;
66398+#endif
66399+
66400+#ifdef CONFIG_PAX_MPROTECT
66401+ pax_flags |= MF_PAX_MPROTECT;
66402+#endif
66403+
66404+#ifdef CONFIG_PAX_RANDMMAP
66405+ if (randomize_va_space)
66406+ pax_flags |= MF_PAX_RANDMMAP;
66407+#endif
66408+
66409+ return pax_flags;
66410+}
66411+
66412+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
66413+{
66414+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
66415+
66416+#ifdef CONFIG_PAX_EI_PAX
66417+
66418+#ifdef CONFIG_PAX_SOFTMODE
66419+ if (pax_softmode)
66420+ return pax_flags;
66421+#endif
66422+
66423+ pax_flags = 0UL;
66424+
66425+#ifdef CONFIG_PAX_PAGEEXEC
66426+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
66427+ pax_flags |= MF_PAX_PAGEEXEC;
66428+#endif
66429+
66430+#ifdef CONFIG_PAX_SEGMEXEC
66431+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
66432+ pax_flags |= MF_PAX_SEGMEXEC;
66433+#endif
66434+
66435+#ifdef CONFIG_PAX_EMUTRAMP
66436+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
66437+ pax_flags |= MF_PAX_EMUTRAMP;
66438+#endif
66439+
66440+#ifdef CONFIG_PAX_MPROTECT
66441+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
66442+ pax_flags |= MF_PAX_MPROTECT;
66443+#endif
66444+
66445+#ifdef CONFIG_PAX_ASLR
66446+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
66447+ pax_flags |= MF_PAX_RANDMMAP;
66448+#endif
66449+
66450+#endif
66451+
66452+ return pax_flags;
66453+
66454+}
66455+
66456+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
66457+{
66458+
66459+#ifdef CONFIG_PAX_PT_PAX_FLAGS
66460+ unsigned long i;
66461+
66462+ for (i = 0UL; i < elf_ex->e_phnum; i++)
66463+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
66464+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
66465+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
66466+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
66467+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
66468+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
66469+ return PAX_PARSE_FLAGS_FALLBACK;
66470+
66471+#ifdef CONFIG_PAX_SOFTMODE
66472+ if (pax_softmode)
66473+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
66474+ else
66475+#endif
66476+
66477+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
66478+ break;
66479+ }
66480+#endif
66481+
66482+ return PAX_PARSE_FLAGS_FALLBACK;
66483+}
66484+
66485+static unsigned long pax_parse_xattr_pax(struct file * const file)
66486+{
66487+
66488+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
66489+ ssize_t xattr_size, i;
66490+ unsigned char xattr_value[sizeof("pemrs") - 1];
66491+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
66492+
66493+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
66494+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
66495+ return PAX_PARSE_FLAGS_FALLBACK;
66496+
66497+ for (i = 0; i < xattr_size; i++)
66498+ switch (xattr_value[i]) {
66499+ default:
66500+ return PAX_PARSE_FLAGS_FALLBACK;
66501+
66502+#define parse_flag(option1, option2, flag) \
66503+ case option1: \
66504+ if (pax_flags_hardmode & MF_PAX_##flag) \
66505+ return PAX_PARSE_FLAGS_FALLBACK;\
66506+ pax_flags_hardmode |= MF_PAX_##flag; \
66507+ break; \
66508+ case option2: \
66509+ if (pax_flags_softmode & MF_PAX_##flag) \
66510+ return PAX_PARSE_FLAGS_FALLBACK;\
66511+ pax_flags_softmode |= MF_PAX_##flag; \
66512+ break;
66513+
66514+ parse_flag('p', 'P', PAGEEXEC);
66515+ parse_flag('e', 'E', EMUTRAMP);
66516+ parse_flag('m', 'M', MPROTECT);
66517+ parse_flag('r', 'R', RANDMMAP);
66518+ parse_flag('s', 'S', SEGMEXEC);
66519+
66520+#undef parse_flag
66521+ }
66522+
66523+ if (pax_flags_hardmode & pax_flags_softmode)
66524+ return PAX_PARSE_FLAGS_FALLBACK;
66525+
66526+#ifdef CONFIG_PAX_SOFTMODE
66527+ if (pax_softmode)
66528+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
66529+ else
66530+#endif
66531+
66532+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
66533+#else
66534+ return PAX_PARSE_FLAGS_FALLBACK;
66535+#endif
66536+
66537+}
66538+
66539+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
66540+{
66541+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
66542+
66543+ pax_flags = pax_parse_defaults();
66544+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
66545+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
66546+ xattr_pax_flags = pax_parse_xattr_pax(file);
66547+
66548+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
66549+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
66550+ pt_pax_flags != xattr_pax_flags)
66551+ return -EINVAL;
66552+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66553+ pax_flags = xattr_pax_flags;
66554+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66555+ pax_flags = pt_pax_flags;
66556+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66557+ pax_flags = ei_pax_flags;
66558+
66559+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
66560+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66561+ if ((__supported_pte_mask & _PAGE_NX))
66562+ pax_flags &= ~MF_PAX_SEGMEXEC;
66563+ else
66564+ pax_flags &= ~MF_PAX_PAGEEXEC;
66565+ }
66566+#endif
66567+
66568+ if (0 > pax_check_flags(&pax_flags))
66569+ return -EINVAL;
66570+
66571+ current->mm->pax_flags = pax_flags;
66572+ return 0;
66573+}
66574+#endif
66575+
66576 /*
66577 * These are the functions used to load ELF style executables and shared
66578 * libraries. There is no binary dependent code anywhere else.
66579@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
66580 {
66581 unsigned long random_variable = 0;
66582
66583+#ifdef CONFIG_PAX_RANDUSTACK
66584+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
66585+ return stack_top - current->mm->delta_stack;
66586+#endif
66587+
66588 if ((current->flags & PF_RANDOMIZE) &&
66589 !(current->personality & ADDR_NO_RANDOMIZE)) {
66590 random_variable = (unsigned long) get_random_int();
66591@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
66592 unsigned long load_addr = 0, load_bias = 0;
66593 int load_addr_set = 0;
66594 char * elf_interpreter = NULL;
66595- unsigned long error;
66596+ unsigned long error = 0;
66597 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
66598 unsigned long elf_bss, elf_brk;
66599 int retval, i;
66600@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
66601 struct elfhdr interp_elf_ex;
66602 } *loc;
66603 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
66604+ unsigned long pax_task_size;
66605
66606 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
66607 if (!loc) {
66608@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
66609 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
66610 may depend on the personality. */
66611 SET_PERSONALITY2(loc->elf_ex, &arch_state);
66612+
66613+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66614+ current->mm->pax_flags = 0UL;
66615+#endif
66616+
66617+#ifdef CONFIG_PAX_DLRESOLVE
66618+ current->mm->call_dl_resolve = 0UL;
66619+#endif
66620+
66621+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
66622+ current->mm->call_syscall = 0UL;
66623+#endif
66624+
66625+#ifdef CONFIG_PAX_ASLR
66626+ current->mm->delta_mmap = 0UL;
66627+ current->mm->delta_stack = 0UL;
66628+#endif
66629+
66630+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66631+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
66632+ send_sig(SIGKILL, current, 0);
66633+ goto out_free_dentry;
66634+ }
66635+#endif
66636+
66637+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
66638+ pax_set_initial_flags(bprm);
66639+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
66640+ if (pax_set_initial_flags_func)
66641+ (pax_set_initial_flags_func)(bprm);
66642+#endif
66643+
66644+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66645+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
66646+ current->mm->context.user_cs_limit = PAGE_SIZE;
66647+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
66648+ }
66649+#endif
66650+
66651+#ifdef CONFIG_PAX_SEGMEXEC
66652+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66653+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
66654+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
66655+ pax_task_size = SEGMEXEC_TASK_SIZE;
66656+ current->mm->def_flags |= VM_NOHUGEPAGE;
66657+ } else
66658+#endif
66659+
66660+ pax_task_size = TASK_SIZE;
66661+
66662+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
66663+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66664+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
66665+ put_cpu();
66666+ }
66667+#endif
66668+
66669+#ifdef CONFIG_PAX_ASLR
66670+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
66671+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
66672+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
66673+ }
66674+#endif
66675+
66676+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66677+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66678+ executable_stack = EXSTACK_DISABLE_X;
66679+ current->personality &= ~READ_IMPLIES_EXEC;
66680+ } else
66681+#endif
66682+
66683 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
66684 current->personality |= READ_IMPLIES_EXEC;
66685
66686@@ -925,12 +1364,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
66687 #else
66688 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
66689 #endif
66690- total_size = total_mapping_size(elf_phdata,
66691- loc->elf_ex.e_phnum);
66692- if (!total_size) {
66693- error = -EINVAL;
66694- goto out_free_dentry;
66695+
66696+#ifdef CONFIG_PAX_RANDMMAP
66697+ /* PaX: randomize base address at the default exe base if requested */
66698+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
66699+#ifdef CONFIG_SPARC64
66700+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
66701+#else
66702+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
66703+#endif
66704+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
66705+ elf_flags |= MAP_FIXED;
66706 }
66707+#endif
66708+
66709+ total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
66710 }
66711
66712 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
66713@@ -962,9 +1410,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
66714 * allowed task size. Note that p_filesz must always be
66715 * <= p_memsz so it is only necessary to check p_memsz.
66716 */
66717- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
66718- elf_ppnt->p_memsz > TASK_SIZE ||
66719- TASK_SIZE - elf_ppnt->p_memsz < k) {
66720+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
66721+ elf_ppnt->p_memsz > pax_task_size ||
66722+ pax_task_size - elf_ppnt->p_memsz < k) {
66723 /* set_brk can never work. Avoid overflows. */
66724 retval = -EINVAL;
66725 goto out_free_dentry;
66726@@ -1000,16 +1448,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
66727 if (retval)
66728 goto out_free_dentry;
66729 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
66730- retval = -EFAULT; /* Nobody gets to see this, but.. */
66731- goto out_free_dentry;
66732+ /*
66733+ * This bss-zeroing can fail if the ELF
66734+ * file specifies odd protections. So
66735+ * we don't check the return value
66736+ */
66737 }
66738
66739+#ifdef CONFIG_PAX_RANDMMAP
66740+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
66741+ unsigned long start, size, flags;
66742+ vm_flags_t vm_flags;
66743+
66744+ start = ELF_PAGEALIGN(elf_brk);
66745+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
66746+ flags = MAP_FIXED | MAP_PRIVATE;
66747+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
66748+
66749+ down_write(&current->mm->mmap_sem);
66750+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
66751+ retval = -ENOMEM;
66752+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
66753+// if (current->personality & ADDR_NO_RANDOMIZE)
66754+// vm_flags |= VM_READ | VM_MAYREAD;
66755+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
66756+ retval = IS_ERR_VALUE(start) ? start : 0;
66757+ }
66758+ up_write(&current->mm->mmap_sem);
66759+ if (retval == 0)
66760+ retval = set_brk(start + size, start + size + PAGE_SIZE);
66761+ if (retval < 0)
66762+ goto out_free_dentry;
66763+ }
66764+#endif
66765+
66766 if (elf_interpreter) {
66767- unsigned long interp_map_addr = 0;
66768-
66769 elf_entry = load_elf_interp(&loc->interp_elf_ex,
66770 interpreter,
66771- &interp_map_addr,
66772 load_bias, interp_elf_phdata);
66773 if (!IS_ERR((void *)elf_entry)) {
66774 /*
66775@@ -1237,7 +1712,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
66776 * Decide what to dump of a segment, part, all or none.
66777 */
66778 static unsigned long vma_dump_size(struct vm_area_struct *vma,
66779- unsigned long mm_flags)
66780+ unsigned long mm_flags, long signr)
66781 {
66782 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
66783
66784@@ -1275,7 +1750,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
66785 if (vma->vm_file == NULL)
66786 return 0;
66787
66788- if (FILTER(MAPPED_PRIVATE))
66789+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
66790 goto whole;
66791
66792 /*
66793@@ -1482,9 +1957,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
66794 {
66795 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
66796 int i = 0;
66797- do
66798+ do {
66799 i += 2;
66800- while (auxv[i - 2] != AT_NULL);
66801+ } while (auxv[i - 2] != AT_NULL);
66802 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
66803 }
66804
66805@@ -1493,7 +1968,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
66806 {
66807 mm_segment_t old_fs = get_fs();
66808 set_fs(KERNEL_DS);
66809- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
66810+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
66811 set_fs(old_fs);
66812 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
66813 }
66814@@ -2213,7 +2688,7 @@ static int elf_core_dump(struct coredump_params *cprm)
66815 vma = next_vma(vma, gate_vma)) {
66816 unsigned long dump_size;
66817
66818- dump_size = vma_dump_size(vma, cprm->mm_flags);
66819+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
66820 vma_filesz[i++] = dump_size;
66821 vma_data_size += dump_size;
66822 }
66823@@ -2321,6 +2796,167 @@ out:
66824
66825 #endif /* CONFIG_ELF_CORE */
66826
66827+#ifdef CONFIG_PAX_MPROTECT
66828+/* PaX: non-PIC ELF libraries need relocations on their executable segments
66829+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
66830+ * we'll remove VM_MAYWRITE for good on RELRO segments.
66831+ *
66832+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
66833+ * basis because we want to allow the common case and not the special ones.
66834+ */
66835+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
66836+{
66837+ struct elfhdr elf_h;
66838+ struct elf_phdr elf_p;
66839+ unsigned long i;
66840+ unsigned long oldflags;
66841+ bool is_textrel_rw, is_textrel_rx, is_relro;
66842+
66843+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
66844+ return;
66845+
66846+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
66847+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
66848+
66849+#ifdef CONFIG_PAX_ELFRELOCS
66850+ /* possible TEXTREL */
66851+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
66852+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
66853+#else
66854+ is_textrel_rw = false;
66855+ is_textrel_rx = false;
66856+#endif
66857+
66858+ /* possible RELRO */
66859+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
66860+
66861+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
66862+ return;
66863+
66864+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
66865+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
66866+
66867+#ifdef CONFIG_PAX_ETEXECRELOCS
66868+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
66869+#else
66870+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
66871+#endif
66872+
66873+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
66874+ !elf_check_arch(&elf_h) ||
66875+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
66876+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
66877+ return;
66878+
66879+ for (i = 0UL; i < elf_h.e_phnum; i++) {
66880+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
66881+ return;
66882+ switch (elf_p.p_type) {
66883+ case PT_DYNAMIC:
66884+ if (!is_textrel_rw && !is_textrel_rx)
66885+ continue;
66886+ i = 0UL;
66887+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
66888+ elf_dyn dyn;
66889+
66890+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
66891+ break;
66892+ if (dyn.d_tag == DT_NULL)
66893+ break;
66894+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
66895+ gr_log_textrel(vma);
66896+ if (is_textrel_rw)
66897+ vma->vm_flags |= VM_MAYWRITE;
66898+ else
66899+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
66900+ vma->vm_flags &= ~VM_MAYWRITE;
66901+ break;
66902+ }
66903+ i++;
66904+ }
66905+ is_textrel_rw = false;
66906+ is_textrel_rx = false;
66907+ continue;
66908+
66909+ case PT_GNU_RELRO:
66910+ if (!is_relro)
66911+ continue;
66912+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
66913+ vma->vm_flags &= ~VM_MAYWRITE;
66914+ is_relro = false;
66915+ continue;
66916+
66917+#ifdef CONFIG_PAX_PT_PAX_FLAGS
66918+ case PT_PAX_FLAGS: {
66919+ const char *msg_mprotect = "", *msg_emutramp = "";
66920+ char *buffer_lib, *buffer_exe;
66921+
66922+ if (elf_p.p_flags & PF_NOMPROTECT)
66923+ msg_mprotect = "MPROTECT disabled";
66924+
66925+#ifdef CONFIG_PAX_EMUTRAMP
66926+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
66927+ msg_emutramp = "EMUTRAMP enabled";
66928+#endif
66929+
66930+ if (!msg_mprotect[0] && !msg_emutramp[0])
66931+ continue;
66932+
66933+ if (!printk_ratelimit())
66934+ continue;
66935+
66936+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
66937+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
66938+ if (buffer_lib && buffer_exe) {
66939+ char *path_lib, *path_exe;
66940+
66941+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
66942+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
66943+
66944+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
66945+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
66946+
66947+ }
66948+ free_page((unsigned long)buffer_exe);
66949+ free_page((unsigned long)buffer_lib);
66950+ continue;
66951+ }
66952+#endif
66953+
66954+ }
66955+ }
66956+}
66957+#endif
66958+
66959+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66960+
66961+extern int grsec_enable_log_rwxmaps;
66962+
66963+static void elf_handle_mmap(struct file *file)
66964+{
66965+ struct elfhdr elf_h;
66966+ struct elf_phdr elf_p;
66967+ unsigned long i;
66968+
66969+ if (!grsec_enable_log_rwxmaps)
66970+ return;
66971+
66972+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
66973+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
66974+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
66975+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
66976+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
66977+ return;
66978+
66979+ for (i = 0UL; i < elf_h.e_phnum; i++) {
66980+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
66981+ return;
66982+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
66983+ gr_log_ptgnustack(file);
66984+ }
66985+}
66986+#endif
66987+
66988 static int __init init_elf_binfmt(void)
66989 {
66990 register_binfmt(&elf_format);
66991diff --git a/fs/block_dev.c b/fs/block_dev.c
66992index 975266b..c3d1856 100644
66993--- a/fs/block_dev.c
66994+++ b/fs/block_dev.c
66995@@ -734,7 +734,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
66996 else if (bdev->bd_contains == bdev)
66997 return true; /* is a whole device which isn't held */
66998
66999- else if (whole->bd_holder == bd_may_claim)
67000+ else if (whole->bd_holder == (void *)bd_may_claim)
67001 return true; /* is a partition of a device that is being partitioned */
67002 else if (whole->bd_holder != NULL)
67003 return false; /* is a partition of a held device */
67004diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
67005index 6d67f32..8f33187 100644
67006--- a/fs/btrfs/ctree.c
67007+++ b/fs/btrfs/ctree.c
67008@@ -1181,9 +1181,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
67009 free_extent_buffer(buf);
67010 add_root_to_dirty_list(root);
67011 } else {
67012- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
67013- parent_start = parent->start;
67014- else
67015+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
67016+ if (parent)
67017+ parent_start = parent->start;
67018+ else
67019+ parent_start = 0;
67020+ } else
67021 parent_start = 0;
67022
67023 WARN_ON(trans->transid != btrfs_header_generation(parent));
67024diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
67025index 82f0c7c..dff78a8 100644
67026--- a/fs/btrfs/delayed-inode.c
67027+++ b/fs/btrfs/delayed-inode.c
67028@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
67029
67030 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
67031 {
67032- int seq = atomic_inc_return(&delayed_root->items_seq);
67033+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
67034 if ((atomic_dec_return(&delayed_root->items) <
67035 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
67036 waitqueue_active(&delayed_root->wait))
67037@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
67038
67039 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
67040 {
67041- int val = atomic_read(&delayed_root->items_seq);
67042+ int val = atomic_read_unchecked(&delayed_root->items_seq);
67043
67044 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
67045 return 1;
67046@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
67047 int seq;
67048 int ret;
67049
67050- seq = atomic_read(&delayed_root->items_seq);
67051+ seq = atomic_read_unchecked(&delayed_root->items_seq);
67052
67053 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
67054 if (ret)
67055diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
67056index f70119f..ab5894d 100644
67057--- a/fs/btrfs/delayed-inode.h
67058+++ b/fs/btrfs/delayed-inode.h
67059@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
67060 */
67061 struct list_head prepare_list;
67062 atomic_t items; /* for delayed items */
67063- atomic_t items_seq; /* for delayed items */
67064+ atomic_unchecked_t items_seq; /* for delayed items */
67065 int nodes; /* for delayed nodes */
67066 wait_queue_head_t wait;
67067 };
67068@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
67069 struct btrfs_delayed_root *delayed_root)
67070 {
67071 atomic_set(&delayed_root->items, 0);
67072- atomic_set(&delayed_root->items_seq, 0);
67073+ atomic_set_unchecked(&delayed_root->items_seq, 0);
67074 delayed_root->nodes = 0;
67075 spin_lock_init(&delayed_root->lock);
67076 init_waitqueue_head(&delayed_root->wait);
67077diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
67078index 05fef19..f3774b8 100644
67079--- a/fs/btrfs/super.c
67080+++ b/fs/btrfs/super.c
67081@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
67082 function, line, errstr);
67083 return;
67084 }
67085- ACCESS_ONCE(trans->transaction->aborted) = errno;
67086+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
67087 /* Wake up anybody who may be waiting on this transaction */
67088 wake_up(&root->fs_info->transaction_wait);
67089 wake_up(&root->fs_info->transaction_blocked_wait);
67090diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
67091index 94edb0a..e94dc93 100644
67092--- a/fs/btrfs/sysfs.c
67093+++ b/fs/btrfs/sysfs.c
67094@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
67095 for (set = 0; set < FEAT_MAX; set++) {
67096 int i;
67097 struct attribute *attrs[2];
67098- struct attribute_group agroup = {
67099+ attribute_group_no_const agroup = {
67100 .name = "features",
67101 .attrs = attrs,
67102 };
67103diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
67104index 2299bfd..4098e72 100644
67105--- a/fs/btrfs/tests/free-space-tests.c
67106+++ b/fs/btrfs/tests/free-space-tests.c
67107@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
67108 * extent entry.
67109 */
67110 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
67111- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
67112+ pax_open_kernel();
67113+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
67114+ pax_close_kernel();
67115
67116 /*
67117 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
67118@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
67119 if (ret)
67120 return ret;
67121
67122- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
67123+ pax_open_kernel();
67124+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
67125+ pax_close_kernel();
67126 __btrfs_remove_free_space_cache(cache->free_space_ctl);
67127
67128 return 0;
67129diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
67130index 154990c..d0cf699 100644
67131--- a/fs/btrfs/tree-log.h
67132+++ b/fs/btrfs/tree-log.h
67133@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
67134 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
67135 struct btrfs_trans_handle *trans)
67136 {
67137- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
67138+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
67139 }
67140
67141 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
67142diff --git a/fs/buffer.c b/fs/buffer.c
67143index 20805db..2e8fc69 100644
67144--- a/fs/buffer.c
67145+++ b/fs/buffer.c
67146@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
67147 bh_cachep = kmem_cache_create("buffer_head",
67148 sizeof(struct buffer_head), 0,
67149 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
67150- SLAB_MEM_SPREAD),
67151+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
67152 NULL);
67153
67154 /*
67155diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
67156index fbb08e9..0fda764 100644
67157--- a/fs/cachefiles/bind.c
67158+++ b/fs/cachefiles/bind.c
67159@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
67160 args);
67161
67162 /* start by checking things over */
67163- ASSERT(cache->fstop_percent >= 0 &&
67164- cache->fstop_percent < cache->fcull_percent &&
67165+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
67166 cache->fcull_percent < cache->frun_percent &&
67167 cache->frun_percent < 100);
67168
67169- ASSERT(cache->bstop_percent >= 0 &&
67170- cache->bstop_percent < cache->bcull_percent &&
67171+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
67172 cache->bcull_percent < cache->brun_percent &&
67173 cache->brun_percent < 100);
67174
67175diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
67176index f601def..b2cf704 100644
67177--- a/fs/cachefiles/daemon.c
67178+++ b/fs/cachefiles/daemon.c
67179@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
67180 if (n > buflen)
67181 return -EMSGSIZE;
67182
67183- if (copy_to_user(_buffer, buffer, n) != 0)
67184+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
67185 return -EFAULT;
67186
67187 return n;
67188@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
67189 if (test_bit(CACHEFILES_DEAD, &cache->flags))
67190 return -EIO;
67191
67192- if (datalen < 0 || datalen > PAGE_SIZE - 1)
67193+ if (datalen > PAGE_SIZE - 1)
67194 return -EOPNOTSUPP;
67195
67196 /* drag the command string into the kernel so we can parse it */
67197@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
67198 if (args[0] != '%' || args[1] != '\0')
67199 return -EINVAL;
67200
67201- if (fstop < 0 || fstop >= cache->fcull_percent)
67202+ if (fstop >= cache->fcull_percent)
67203 return cachefiles_daemon_range_error(cache, args);
67204
67205 cache->fstop_percent = fstop;
67206@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
67207 if (args[0] != '%' || args[1] != '\0')
67208 return -EINVAL;
67209
67210- if (bstop < 0 || bstop >= cache->bcull_percent)
67211+ if (bstop >= cache->bcull_percent)
67212 return cachefiles_daemon_range_error(cache, args);
67213
67214 cache->bstop_percent = bstop;
67215diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
67216index 8c52472..c4e3a69 100644
67217--- a/fs/cachefiles/internal.h
67218+++ b/fs/cachefiles/internal.h
67219@@ -66,7 +66,7 @@ struct cachefiles_cache {
67220 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
67221 struct rb_root active_nodes; /* active nodes (can't be culled) */
67222 rwlock_t active_lock; /* lock for active_nodes */
67223- atomic_t gravecounter; /* graveyard uniquifier */
67224+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
67225 unsigned frun_percent; /* when to stop culling (% files) */
67226 unsigned fcull_percent; /* when to start culling (% files) */
67227 unsigned fstop_percent; /* when to stop allocating (% files) */
67228@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
67229 * proc.c
67230 */
67231 #ifdef CONFIG_CACHEFILES_HISTOGRAM
67232-extern atomic_t cachefiles_lookup_histogram[HZ];
67233-extern atomic_t cachefiles_mkdir_histogram[HZ];
67234-extern atomic_t cachefiles_create_histogram[HZ];
67235+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
67236+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
67237+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
67238
67239 extern int __init cachefiles_proc_init(void);
67240 extern void cachefiles_proc_cleanup(void);
67241 static inline
67242-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
67243+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
67244 {
67245 unsigned long jif = jiffies - start_jif;
67246 if (jif >= HZ)
67247 jif = HZ - 1;
67248- atomic_inc(&histogram[jif]);
67249+ atomic_inc_unchecked(&histogram[jif]);
67250 }
67251
67252 #else
67253diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
67254index 1e51714..411eded 100644
67255--- a/fs/cachefiles/namei.c
67256+++ b/fs/cachefiles/namei.c
67257@@ -309,7 +309,7 @@ try_again:
67258 /* first step is to make up a grave dentry in the graveyard */
67259 sprintf(nbuffer, "%08x%08x",
67260 (uint32_t) get_seconds(),
67261- (uint32_t) atomic_inc_return(&cache->gravecounter));
67262+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
67263
67264 /* do the multiway lock magic */
67265 trap = lock_rename(cache->graveyard, dir);
67266diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
67267index eccd339..4c1d995 100644
67268--- a/fs/cachefiles/proc.c
67269+++ b/fs/cachefiles/proc.c
67270@@ -14,9 +14,9 @@
67271 #include <linux/seq_file.h>
67272 #include "internal.h"
67273
67274-atomic_t cachefiles_lookup_histogram[HZ];
67275-atomic_t cachefiles_mkdir_histogram[HZ];
67276-atomic_t cachefiles_create_histogram[HZ];
67277+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
67278+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
67279+atomic_unchecked_t cachefiles_create_histogram[HZ];
67280
67281 /*
67282 * display the latency histogram
67283@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
67284 return 0;
67285 default:
67286 index = (unsigned long) v - 3;
67287- x = atomic_read(&cachefiles_lookup_histogram[index]);
67288- y = atomic_read(&cachefiles_mkdir_histogram[index]);
67289- z = atomic_read(&cachefiles_create_histogram[index]);
67290+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
67291+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
67292+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
67293 if (x == 0 && y == 0 && z == 0)
67294 return 0;
67295
67296diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
67297index 83e9976..bfd1eee 100644
67298--- a/fs/ceph/dir.c
67299+++ b/fs/ceph/dir.c
67300@@ -127,6 +127,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
67301 struct dentry *dentry, *last;
67302 struct ceph_dentry_info *di;
67303 int err = 0;
67304+ char d_name[DNAME_INLINE_LEN];
67305+ const unsigned char *name;
67306
67307 /* claim ref on last dentry we returned */
67308 last = fi->dentry;
67309@@ -190,7 +192,12 @@ more:
67310
67311 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
67312 dentry, dentry, dentry->d_inode);
67313- if (!dir_emit(ctx, dentry->d_name.name,
67314+ name = dentry->d_name.name;
67315+ if (name == dentry->d_iname) {
67316+ memcpy(d_name, name, dentry->d_name.len);
67317+ name = d_name;
67318+ }
67319+ if (!dir_emit(ctx, name,
67320 dentry->d_name.len,
67321 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
67322 dentry->d_inode->i_mode >> 12)) {
67323@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
67324 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
67325 struct ceph_mds_client *mdsc = fsc->mdsc;
67326 unsigned frag = fpos_frag(ctx->pos);
67327- int off = fpos_off(ctx->pos);
67328+ unsigned int off = fpos_off(ctx->pos);
67329 int err;
67330 u32 ftype;
67331 struct ceph_mds_reply_info_parsed *rinfo;
67332diff --git a/fs/ceph/super.c b/fs/ceph/super.c
67333index a63997b..ddc0577 100644
67334--- a/fs/ceph/super.c
67335+++ b/fs/ceph/super.c
67336@@ -889,7 +889,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
67337 /*
67338 * construct our own bdi so we can control readahead, etc.
67339 */
67340-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
67341+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
67342
67343 static int ceph_register_bdi(struct super_block *sb,
67344 struct ceph_fs_client *fsc)
67345@@ -906,7 +906,7 @@ static int ceph_register_bdi(struct super_block *sb,
67346 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
67347
67348 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
67349- atomic_long_inc_return(&bdi_seq));
67350+ atomic_long_inc_return_unchecked(&bdi_seq));
67351 if (!err)
67352 sb->s_bdi = &fsc->backing_dev_info;
67353 return err;
67354diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
67355index 7febcf2..62a5721 100644
67356--- a/fs/cifs/cifs_debug.c
67357+++ b/fs/cifs/cifs_debug.c
67358@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
67359
67360 if (strtobool(&c, &bv) == 0) {
67361 #ifdef CONFIG_CIFS_STATS2
67362- atomic_set(&totBufAllocCount, 0);
67363- atomic_set(&totSmBufAllocCount, 0);
67364+ atomic_set_unchecked(&totBufAllocCount, 0);
67365+ atomic_set_unchecked(&totSmBufAllocCount, 0);
67366 #endif /* CONFIG_CIFS_STATS2 */
67367 spin_lock(&cifs_tcp_ses_lock);
67368 list_for_each(tmp1, &cifs_tcp_ses_list) {
67369@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
67370 tcon = list_entry(tmp3,
67371 struct cifs_tcon,
67372 tcon_list);
67373- atomic_set(&tcon->num_smbs_sent, 0);
67374+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
67375 if (server->ops->clear_stats)
67376 server->ops->clear_stats(tcon);
67377 }
67378@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
67379 smBufAllocCount.counter, cifs_min_small);
67380 #ifdef CONFIG_CIFS_STATS2
67381 seq_printf(m, "Total Large %d Small %d Allocations\n",
67382- atomic_read(&totBufAllocCount),
67383- atomic_read(&totSmBufAllocCount));
67384+ atomic_read_unchecked(&totBufAllocCount),
67385+ atomic_read_unchecked(&totSmBufAllocCount));
67386 #endif /* CONFIG_CIFS_STATS2 */
67387
67388 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
67389@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
67390 if (tcon->need_reconnect)
67391 seq_puts(m, "\tDISCONNECTED ");
67392 seq_printf(m, "\nSMBs: %d",
67393- atomic_read(&tcon->num_smbs_sent));
67394+ atomic_read_unchecked(&tcon->num_smbs_sent));
67395 if (server->ops->print_stats)
67396 server->ops->print_stats(m, tcon);
67397 }
67398diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
67399index d72fe37..ded5511 100644
67400--- a/fs/cifs/cifsfs.c
67401+++ b/fs/cifs/cifsfs.c
67402@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
67403 */
67404 cifs_req_cachep = kmem_cache_create("cifs_request",
67405 CIFSMaxBufSize + max_hdr_size, 0,
67406- SLAB_HWCACHE_ALIGN, NULL);
67407+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
67408 if (cifs_req_cachep == NULL)
67409 return -ENOMEM;
67410
67411@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
67412 efficient to alloc 1 per page off the slab compared to 17K (5page)
67413 alloc of large cifs buffers even when page debugging is on */
67414 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
67415- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
67416+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
67417 NULL);
67418 if (cifs_sm_req_cachep == NULL) {
67419 mempool_destroy(cifs_req_poolp);
67420@@ -1204,8 +1204,8 @@ init_cifs(void)
67421 atomic_set(&bufAllocCount, 0);
67422 atomic_set(&smBufAllocCount, 0);
67423 #ifdef CONFIG_CIFS_STATS2
67424- atomic_set(&totBufAllocCount, 0);
67425- atomic_set(&totSmBufAllocCount, 0);
67426+ atomic_set_unchecked(&totBufAllocCount, 0);
67427+ atomic_set_unchecked(&totSmBufAllocCount, 0);
67428 #endif /* CONFIG_CIFS_STATS2 */
67429
67430 atomic_set(&midCount, 0);
67431diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
67432index 22b289a..bbbba08 100644
67433--- a/fs/cifs/cifsglob.h
67434+++ b/fs/cifs/cifsglob.h
67435@@ -823,35 +823,35 @@ struct cifs_tcon {
67436 __u16 Flags; /* optional support bits */
67437 enum statusEnum tidStatus;
67438 #ifdef CONFIG_CIFS_STATS
67439- atomic_t num_smbs_sent;
67440+ atomic_unchecked_t num_smbs_sent;
67441 union {
67442 struct {
67443- atomic_t num_writes;
67444- atomic_t num_reads;
67445- atomic_t num_flushes;
67446- atomic_t num_oplock_brks;
67447- atomic_t num_opens;
67448- atomic_t num_closes;
67449- atomic_t num_deletes;
67450- atomic_t num_mkdirs;
67451- atomic_t num_posixopens;
67452- atomic_t num_posixmkdirs;
67453- atomic_t num_rmdirs;
67454- atomic_t num_renames;
67455- atomic_t num_t2renames;
67456- atomic_t num_ffirst;
67457- atomic_t num_fnext;
67458- atomic_t num_fclose;
67459- atomic_t num_hardlinks;
67460- atomic_t num_symlinks;
67461- atomic_t num_locks;
67462- atomic_t num_acl_get;
67463- atomic_t num_acl_set;
67464+ atomic_unchecked_t num_writes;
67465+ atomic_unchecked_t num_reads;
67466+ atomic_unchecked_t num_flushes;
67467+ atomic_unchecked_t num_oplock_brks;
67468+ atomic_unchecked_t num_opens;
67469+ atomic_unchecked_t num_closes;
67470+ atomic_unchecked_t num_deletes;
67471+ atomic_unchecked_t num_mkdirs;
67472+ atomic_unchecked_t num_posixopens;
67473+ atomic_unchecked_t num_posixmkdirs;
67474+ atomic_unchecked_t num_rmdirs;
67475+ atomic_unchecked_t num_renames;
67476+ atomic_unchecked_t num_t2renames;
67477+ atomic_unchecked_t num_ffirst;
67478+ atomic_unchecked_t num_fnext;
67479+ atomic_unchecked_t num_fclose;
67480+ atomic_unchecked_t num_hardlinks;
67481+ atomic_unchecked_t num_symlinks;
67482+ atomic_unchecked_t num_locks;
67483+ atomic_unchecked_t num_acl_get;
67484+ atomic_unchecked_t num_acl_set;
67485 } cifs_stats;
67486 #ifdef CONFIG_CIFS_SMB2
67487 struct {
67488- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
67489- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
67490+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
67491+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
67492 } smb2_stats;
67493 #endif /* CONFIG_CIFS_SMB2 */
67494 } stats;
67495@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
67496 }
67497
67498 #ifdef CONFIG_CIFS_STATS
67499-#define cifs_stats_inc atomic_inc
67500+#define cifs_stats_inc atomic_inc_unchecked
67501
67502 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
67503 unsigned int bytes)
67504@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
67505 /* Various Debug counters */
67506 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
67507 #ifdef CONFIG_CIFS_STATS2
67508-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
67509-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
67510+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
67511+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
67512 #endif
67513 GLOBAL_EXTERN atomic_t smBufAllocCount;
67514 GLOBAL_EXTERN atomic_t midCount;
67515diff --git a/fs/cifs/file.c b/fs/cifs/file.c
67516index ca30c39..570fb94 100644
67517--- a/fs/cifs/file.c
67518+++ b/fs/cifs/file.c
67519@@ -2055,10 +2055,14 @@ static int cifs_writepages(struct address_space *mapping,
67520 index = mapping->writeback_index; /* Start from prev offset */
67521 end = -1;
67522 } else {
67523- index = wbc->range_start >> PAGE_CACHE_SHIFT;
67524- end = wbc->range_end >> PAGE_CACHE_SHIFT;
67525- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
67526+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
67527 range_whole = true;
67528+ index = 0;
67529+ end = ULONG_MAX;
67530+ } else {
67531+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
67532+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
67533+ }
67534 scanned = true;
67535 }
67536 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
67537diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
67538index 3379463..3af418a 100644
67539--- a/fs/cifs/misc.c
67540+++ b/fs/cifs/misc.c
67541@@ -170,7 +170,7 @@ cifs_buf_get(void)
67542 memset(ret_buf, 0, buf_size + 3);
67543 atomic_inc(&bufAllocCount);
67544 #ifdef CONFIG_CIFS_STATS2
67545- atomic_inc(&totBufAllocCount);
67546+ atomic_inc_unchecked(&totBufAllocCount);
67547 #endif /* CONFIG_CIFS_STATS2 */
67548 }
67549
67550@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
67551 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
67552 atomic_inc(&smBufAllocCount);
67553 #ifdef CONFIG_CIFS_STATS2
67554- atomic_inc(&totSmBufAllocCount);
67555+ atomic_inc_unchecked(&totSmBufAllocCount);
67556 #endif /* CONFIG_CIFS_STATS2 */
67557
67558 }
67559diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
67560index d297903..1cb7516 100644
67561--- a/fs/cifs/smb1ops.c
67562+++ b/fs/cifs/smb1ops.c
67563@@ -622,27 +622,27 @@ static void
67564 cifs_clear_stats(struct cifs_tcon *tcon)
67565 {
67566 #ifdef CONFIG_CIFS_STATS
67567- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
67568- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
67569- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
67570- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
67571- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
67572- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
67573- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
67574- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
67575- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
67576- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
67577- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
67578- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
67579- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
67580- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
67581- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
67582- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
67583- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
67584- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
67585- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
67586- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
67587- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
67588+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
67589+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
67590+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
67591+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
67592+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
67593+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
67594+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
67595+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
67596+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
67597+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
67598+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
67599+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
67600+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
67601+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
67602+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
67603+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
67604+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
67605+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
67606+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
67607+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
67608+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
67609 #endif
67610 }
67611
67612@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
67613 {
67614 #ifdef CONFIG_CIFS_STATS
67615 seq_printf(m, " Oplocks breaks: %d",
67616- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
67617+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
67618 seq_printf(m, "\nReads: %d Bytes: %llu",
67619- atomic_read(&tcon->stats.cifs_stats.num_reads),
67620+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
67621 (long long)(tcon->bytes_read));
67622 seq_printf(m, "\nWrites: %d Bytes: %llu",
67623- atomic_read(&tcon->stats.cifs_stats.num_writes),
67624+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
67625 (long long)(tcon->bytes_written));
67626 seq_printf(m, "\nFlushes: %d",
67627- atomic_read(&tcon->stats.cifs_stats.num_flushes));
67628+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
67629 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
67630- atomic_read(&tcon->stats.cifs_stats.num_locks),
67631- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
67632- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
67633+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
67634+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
67635+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
67636 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
67637- atomic_read(&tcon->stats.cifs_stats.num_opens),
67638- atomic_read(&tcon->stats.cifs_stats.num_closes),
67639- atomic_read(&tcon->stats.cifs_stats.num_deletes));
67640+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
67641+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
67642+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
67643 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
67644- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
67645- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
67646+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
67647+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
67648 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
67649- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
67650- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
67651+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
67652+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
67653 seq_printf(m, "\nRenames: %d T2 Renames %d",
67654- atomic_read(&tcon->stats.cifs_stats.num_renames),
67655- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
67656+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
67657+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
67658 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
67659- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
67660- atomic_read(&tcon->stats.cifs_stats.num_fnext),
67661- atomic_read(&tcon->stats.cifs_stats.num_fclose));
67662+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
67663+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
67664+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
67665 #endif
67666 }
67667
67668diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
67669index eab05e1..ffe5ea4 100644
67670--- a/fs/cifs/smb2ops.c
67671+++ b/fs/cifs/smb2ops.c
67672@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
67673 #ifdef CONFIG_CIFS_STATS
67674 int i;
67675 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
67676- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
67677- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
67678+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
67679+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
67680 }
67681 #endif
67682 }
67683@@ -459,65 +459,65 @@ static void
67684 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
67685 {
67686 #ifdef CONFIG_CIFS_STATS
67687- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
67688- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
67689+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
67690+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
67691 seq_printf(m, "\nNegotiates: %d sent %d failed",
67692- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
67693- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
67694+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
67695+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
67696 seq_printf(m, "\nSessionSetups: %d sent %d failed",
67697- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
67698- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
67699+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
67700+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
67701 seq_printf(m, "\nLogoffs: %d sent %d failed",
67702- atomic_read(&sent[SMB2_LOGOFF_HE]),
67703- atomic_read(&failed[SMB2_LOGOFF_HE]));
67704+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
67705+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
67706 seq_printf(m, "\nTreeConnects: %d sent %d failed",
67707- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
67708- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
67709+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
67710+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
67711 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
67712- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
67713- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
67714+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
67715+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
67716 seq_printf(m, "\nCreates: %d sent %d failed",
67717- atomic_read(&sent[SMB2_CREATE_HE]),
67718- atomic_read(&failed[SMB2_CREATE_HE]));
67719+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
67720+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
67721 seq_printf(m, "\nCloses: %d sent %d failed",
67722- atomic_read(&sent[SMB2_CLOSE_HE]),
67723- atomic_read(&failed[SMB2_CLOSE_HE]));
67724+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
67725+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
67726 seq_printf(m, "\nFlushes: %d sent %d failed",
67727- atomic_read(&sent[SMB2_FLUSH_HE]),
67728- atomic_read(&failed[SMB2_FLUSH_HE]));
67729+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
67730+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
67731 seq_printf(m, "\nReads: %d sent %d failed",
67732- atomic_read(&sent[SMB2_READ_HE]),
67733- atomic_read(&failed[SMB2_READ_HE]));
67734+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
67735+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
67736 seq_printf(m, "\nWrites: %d sent %d failed",
67737- atomic_read(&sent[SMB2_WRITE_HE]),
67738- atomic_read(&failed[SMB2_WRITE_HE]));
67739+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
67740+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
67741 seq_printf(m, "\nLocks: %d sent %d failed",
67742- atomic_read(&sent[SMB2_LOCK_HE]),
67743- atomic_read(&failed[SMB2_LOCK_HE]));
67744+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
67745+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
67746 seq_printf(m, "\nIOCTLs: %d sent %d failed",
67747- atomic_read(&sent[SMB2_IOCTL_HE]),
67748- atomic_read(&failed[SMB2_IOCTL_HE]));
67749+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
67750+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
67751 seq_printf(m, "\nCancels: %d sent %d failed",
67752- atomic_read(&sent[SMB2_CANCEL_HE]),
67753- atomic_read(&failed[SMB2_CANCEL_HE]));
67754+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
67755+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
67756 seq_printf(m, "\nEchos: %d sent %d failed",
67757- atomic_read(&sent[SMB2_ECHO_HE]),
67758- atomic_read(&failed[SMB2_ECHO_HE]));
67759+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
67760+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
67761 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
67762- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
67763- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
67764+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
67765+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
67766 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
67767- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
67768- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
67769+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
67770+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
67771 seq_printf(m, "\nQueryInfos: %d sent %d failed",
67772- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
67773- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
67774+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
67775+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
67776 seq_printf(m, "\nSetInfos: %d sent %d failed",
67777- atomic_read(&sent[SMB2_SET_INFO_HE]),
67778- atomic_read(&failed[SMB2_SET_INFO_HE]));
67779+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
67780+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
67781 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
67782- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
67783- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
67784+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
67785+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
67786 #endif
67787 }
67788
67789diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
67790index 65cd7a8..3518676 100644
67791--- a/fs/cifs/smb2pdu.c
67792+++ b/fs/cifs/smb2pdu.c
67793@@ -2147,8 +2147,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
67794 default:
67795 cifs_dbg(VFS, "info level %u isn't supported\n",
67796 srch_inf->info_level);
67797- rc = -EINVAL;
67798- goto qdir_exit;
67799+ return -EINVAL;
67800 }
67801
67802 req->FileIndex = cpu_to_le32(index);
67803diff --git a/fs/coda/cache.c b/fs/coda/cache.c
67804index 46ee6f2..89a9e7f 100644
67805--- a/fs/coda/cache.c
67806+++ b/fs/coda/cache.c
67807@@ -24,7 +24,7 @@
67808 #include "coda_linux.h"
67809 #include "coda_cache.h"
67810
67811-static atomic_t permission_epoch = ATOMIC_INIT(0);
67812+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
67813
67814 /* replace or extend an acl cache hit */
67815 void coda_cache_enter(struct inode *inode, int mask)
67816@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
67817 struct coda_inode_info *cii = ITOC(inode);
67818
67819 spin_lock(&cii->c_lock);
67820- cii->c_cached_epoch = atomic_read(&permission_epoch);
67821+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
67822 if (!uid_eq(cii->c_uid, current_fsuid())) {
67823 cii->c_uid = current_fsuid();
67824 cii->c_cached_perm = mask;
67825@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
67826 {
67827 struct coda_inode_info *cii = ITOC(inode);
67828 spin_lock(&cii->c_lock);
67829- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
67830+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
67831 spin_unlock(&cii->c_lock);
67832 }
67833
67834 /* remove all acl caches */
67835 void coda_cache_clear_all(struct super_block *sb)
67836 {
67837- atomic_inc(&permission_epoch);
67838+ atomic_inc_unchecked(&permission_epoch);
67839 }
67840
67841
67842@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
67843 spin_lock(&cii->c_lock);
67844 hit = (mask & cii->c_cached_perm) == mask &&
67845 uid_eq(cii->c_uid, current_fsuid()) &&
67846- cii->c_cached_epoch == atomic_read(&permission_epoch);
67847+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
67848 spin_unlock(&cii->c_lock);
67849
67850 return hit;
67851diff --git a/fs/compat.c b/fs/compat.c
67852index 6fd272d..dd34ba2 100644
67853--- a/fs/compat.c
67854+++ b/fs/compat.c
67855@@ -54,7 +54,7 @@
67856 #include <asm/ioctls.h>
67857 #include "internal.h"
67858
67859-int compat_log = 1;
67860+int compat_log = 0;
67861
67862 int compat_printk(const char *fmt, ...)
67863 {
67864@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
67865
67866 set_fs(KERNEL_DS);
67867 /* The __user pointer cast is valid because of the set_fs() */
67868- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
67869+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
67870 set_fs(oldfs);
67871 /* truncating is ok because it's a user address */
67872 if (!ret)
67873@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
67874 goto out;
67875
67876 ret = -EINVAL;
67877- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
67878+ if (nr_segs > UIO_MAXIOV)
67879 goto out;
67880 if (nr_segs > fast_segs) {
67881 ret = -ENOMEM;
67882@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
67883 struct compat_readdir_callback {
67884 struct dir_context ctx;
67885 struct compat_old_linux_dirent __user *dirent;
67886+ struct file * file;
67887 int result;
67888 };
67889
67890@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
67891 buf->result = -EOVERFLOW;
67892 return -EOVERFLOW;
67893 }
67894+
67895+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67896+ return 0;
67897+
67898 buf->result++;
67899 dirent = buf->dirent;
67900 if (!access_ok(VERIFY_WRITE, dirent,
67901@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67902 if (!f.file)
67903 return -EBADF;
67904
67905+ buf.file = f.file;
67906 error = iterate_dir(f.file, &buf.ctx);
67907 if (buf.result)
67908 error = buf.result;
67909@@ -913,6 +919,7 @@ struct compat_getdents_callback {
67910 struct dir_context ctx;
67911 struct compat_linux_dirent __user *current_dir;
67912 struct compat_linux_dirent __user *previous;
67913+ struct file * file;
67914 int count;
67915 int error;
67916 };
67917@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
67918 buf->error = -EOVERFLOW;
67919 return -EOVERFLOW;
67920 }
67921+
67922+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67923+ return 0;
67924+
67925 dirent = buf->previous;
67926 if (dirent) {
67927 if (__put_user(offset, &dirent->d_off))
67928@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
67929 if (!f.file)
67930 return -EBADF;
67931
67932+ buf.file = f.file;
67933 error = iterate_dir(f.file, &buf.ctx);
67934 if (error >= 0)
67935 error = buf.error;
67936@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
67937 struct dir_context ctx;
67938 struct linux_dirent64 __user *current_dir;
67939 struct linux_dirent64 __user *previous;
67940+ struct file * file;
67941 int count;
67942 int error;
67943 };
67944@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
67945 buf->error = -EINVAL; /* only used if we fail.. */
67946 if (reclen > buf->count)
67947 return -EINVAL;
67948+
67949+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67950+ return 0;
67951+
67952 dirent = buf->previous;
67953
67954 if (dirent) {
67955@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67956 if (!f.file)
67957 return -EBADF;
67958
67959+ buf.file = f.file;
67960 error = iterate_dir(f.file, &buf.ctx);
67961 if (error >= 0)
67962 error = buf.error;
67963diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
67964index 4d24d17..4f8c09e 100644
67965--- a/fs/compat_binfmt_elf.c
67966+++ b/fs/compat_binfmt_elf.c
67967@@ -30,11 +30,13 @@
67968 #undef elf_phdr
67969 #undef elf_shdr
67970 #undef elf_note
67971+#undef elf_dyn
67972 #undef elf_addr_t
67973 #define elfhdr elf32_hdr
67974 #define elf_phdr elf32_phdr
67975 #define elf_shdr elf32_shdr
67976 #define elf_note elf32_note
67977+#define elf_dyn Elf32_Dyn
67978 #define elf_addr_t Elf32_Addr
67979
67980 /*
67981diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
67982index afec645..9c65620 100644
67983--- a/fs/compat_ioctl.c
67984+++ b/fs/compat_ioctl.c
67985@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
67986 return -EFAULT;
67987 if (__get_user(udata, &ss32->iomem_base))
67988 return -EFAULT;
67989- ss.iomem_base = compat_ptr(udata);
67990+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
67991 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
67992 __get_user(ss.port_high, &ss32->port_high))
67993 return -EFAULT;
67994@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
67995 for (i = 0; i < nmsgs; i++) {
67996 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
67997 return -EFAULT;
67998- if (get_user(datap, &umsgs[i].buf) ||
67999- put_user(compat_ptr(datap), &tmsgs[i].buf))
68000+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
68001+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
68002 return -EFAULT;
68003 }
68004 return sys_ioctl(fd, cmd, (unsigned long)tdata);
68005@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
68006 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
68007 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
68008 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
68009- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
68010+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
68011 return -EFAULT;
68012
68013 return ioctl_preallocate(file, p);
68014@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
68015 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
68016 {
68017 unsigned int a, b;
68018- a = *(unsigned int *)p;
68019- b = *(unsigned int *)q;
68020+ a = *(const unsigned int *)p;
68021+ b = *(const unsigned int *)q;
68022 if (a > b)
68023 return 1;
68024 if (a < b)
68025diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
68026index cf0db00..c7f70e8 100644
68027--- a/fs/configfs/dir.c
68028+++ b/fs/configfs/dir.c
68029@@ -1540,7 +1540,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
68030 }
68031 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
68032 struct configfs_dirent *next;
68033- const char *name;
68034+ const unsigned char * name;
68035+ char d_name[sizeof(next->s_dentry->d_iname)];
68036 int len;
68037 struct inode *inode = NULL;
68038
68039@@ -1549,7 +1550,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
68040 continue;
68041
68042 name = configfs_get_name(next);
68043- len = strlen(name);
68044+ if (next->s_dentry && name == next->s_dentry->d_iname) {
68045+ len = next->s_dentry->d_name.len;
68046+ memcpy(d_name, name, len);
68047+ name = d_name;
68048+ } else
68049+ len = strlen(name);
68050
68051 /*
68052 * We'll have a dentry and an inode for
68053diff --git a/fs/coredump.c b/fs/coredump.c
68054index bbbe139..b76fae5 100644
68055--- a/fs/coredump.c
68056+++ b/fs/coredump.c
68057@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
68058 struct pipe_inode_info *pipe = file->private_data;
68059
68060 pipe_lock(pipe);
68061- pipe->readers++;
68062- pipe->writers--;
68063+ atomic_inc(&pipe->readers);
68064+ atomic_dec(&pipe->writers);
68065 wake_up_interruptible_sync(&pipe->wait);
68066 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
68067 pipe_unlock(pipe);
68068@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
68069 * We actually want wait_event_freezable() but then we need
68070 * to clear TIF_SIGPENDING and improve dump_interrupted().
68071 */
68072- wait_event_interruptible(pipe->wait, pipe->readers == 1);
68073+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
68074
68075 pipe_lock(pipe);
68076- pipe->readers--;
68077- pipe->writers++;
68078+ atomic_dec(&pipe->readers);
68079+ atomic_inc(&pipe->writers);
68080 pipe_unlock(pipe);
68081 }
68082
68083@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
68084 struct files_struct *displaced;
68085 bool need_nonrelative = false;
68086 bool core_dumped = false;
68087- static atomic_t core_dump_count = ATOMIC_INIT(0);
68088+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
68089+ long signr = siginfo->si_signo;
68090+ int dumpable;
68091 struct coredump_params cprm = {
68092 .siginfo = siginfo,
68093 .regs = signal_pt_regs(),
68094@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
68095 .mm_flags = mm->flags,
68096 };
68097
68098- audit_core_dumps(siginfo->si_signo);
68099+ audit_core_dumps(signr);
68100+
68101+ dumpable = __get_dumpable(cprm.mm_flags);
68102+
68103+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
68104+ gr_handle_brute_attach(dumpable);
68105
68106 binfmt = mm->binfmt;
68107 if (!binfmt || !binfmt->core_dump)
68108 goto fail;
68109- if (!__get_dumpable(cprm.mm_flags))
68110+ if (!dumpable)
68111 goto fail;
68112
68113 cred = prepare_creds();
68114@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
68115 need_nonrelative = true;
68116 }
68117
68118- retval = coredump_wait(siginfo->si_signo, &core_state);
68119+ retval = coredump_wait(signr, &core_state);
68120 if (retval < 0)
68121 goto fail_creds;
68122
68123@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
68124 }
68125 cprm.limit = RLIM_INFINITY;
68126
68127- dump_count = atomic_inc_return(&core_dump_count);
68128+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
68129 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
68130 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
68131 task_tgid_vnr(current), current->comm);
68132@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
68133 } else {
68134 struct inode *inode;
68135
68136+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
68137+
68138 if (cprm.limit < binfmt->min_coredump)
68139 goto fail_unlock;
68140
68141@@ -681,7 +690,7 @@ close_fail:
68142 filp_close(cprm.file, NULL);
68143 fail_dropcount:
68144 if (ispipe)
68145- atomic_dec(&core_dump_count);
68146+ atomic_dec_unchecked(&core_dump_count);
68147 fail_unlock:
68148 kfree(cn.corename);
68149 coredump_finish(mm, core_dumped);
68150@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
68151 struct file *file = cprm->file;
68152 loff_t pos = file->f_pos;
68153 ssize_t n;
68154+
68155+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
68156 if (cprm->written + nr > cprm->limit)
68157 return 0;
68158 while (nr) {
68159diff --git a/fs/dcache.c b/fs/dcache.c
68160index c71e373..5c1f656 100644
68161--- a/fs/dcache.c
68162+++ b/fs/dcache.c
68163@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
68164 * dentry_iput drops the locks, at which point nobody (except
68165 * transient RCU lookups) can reach this dentry.
68166 */
68167- BUG_ON(dentry->d_lockref.count > 0);
68168+ BUG_ON(__lockref_read(&dentry->d_lockref) > 0);
68169 this_cpu_dec(nr_dentry);
68170 if (dentry->d_op && dentry->d_op->d_release)
68171 dentry->d_op->d_release(dentry);
68172@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
68173 struct dentry *parent = dentry->d_parent;
68174 if (IS_ROOT(dentry))
68175 return NULL;
68176- if (unlikely(dentry->d_lockref.count < 0))
68177+ if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
68178 return NULL;
68179 if (likely(spin_trylock(&parent->d_lock)))
68180 return parent;
68181@@ -626,8 +626,8 @@ static inline bool fast_dput(struct dentry *dentry)
68182 */
68183 if (unlikely(ret < 0)) {
68184 spin_lock(&dentry->d_lock);
68185- if (dentry->d_lockref.count > 1) {
68186- dentry->d_lockref.count--;
68187+ if (__lockref_read(&dentry->d_lockref) > 1) {
68188+ __lockref_dec(&dentry->d_lockref);
68189 spin_unlock(&dentry->d_lock);
68190 return 1;
68191 }
68192@@ -682,7 +682,7 @@ static inline bool fast_dput(struct dentry *dentry)
68193 * else could have killed it and marked it dead. Either way, we
68194 * don't need to do anything else.
68195 */
68196- if (dentry->d_lockref.count) {
68197+ if (__lockref_read(&dentry->d_lockref)) {
68198 spin_unlock(&dentry->d_lock);
68199 return 1;
68200 }
68201@@ -692,7 +692,7 @@ static inline bool fast_dput(struct dentry *dentry)
68202 * lock, and we just tested that it was zero, so we can just
68203 * set it to 1.
68204 */
68205- dentry->d_lockref.count = 1;
68206+ __lockref_set(&dentry->d_lockref, 1);
68207 return 0;
68208 }
68209
68210@@ -751,7 +751,7 @@ repeat:
68211 dentry->d_flags |= DCACHE_REFERENCED;
68212 dentry_lru_add(dentry);
68213
68214- dentry->d_lockref.count--;
68215+ __lockref_dec(&dentry->d_lockref);
68216 spin_unlock(&dentry->d_lock);
68217 return;
68218
68219@@ -766,7 +766,7 @@ EXPORT_SYMBOL(dput);
68220 /* This must be called with d_lock held */
68221 static inline void __dget_dlock(struct dentry *dentry)
68222 {
68223- dentry->d_lockref.count++;
68224+ __lockref_inc(&dentry->d_lockref);
68225 }
68226
68227 static inline void __dget(struct dentry *dentry)
68228@@ -807,8 +807,8 @@ repeat:
68229 goto repeat;
68230 }
68231 rcu_read_unlock();
68232- BUG_ON(!ret->d_lockref.count);
68233- ret->d_lockref.count++;
68234+ BUG_ON(!__lockref_read(&ret->d_lockref));
68235+ __lockref_inc(&ret->d_lockref);
68236 spin_unlock(&ret->d_lock);
68237 return ret;
68238 }
68239@@ -886,9 +886,9 @@ restart:
68240 spin_lock(&inode->i_lock);
68241 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
68242 spin_lock(&dentry->d_lock);
68243- if (!dentry->d_lockref.count) {
68244+ if (!__lockref_read(&dentry->d_lockref)) {
68245 struct dentry *parent = lock_parent(dentry);
68246- if (likely(!dentry->d_lockref.count)) {
68247+ if (likely(!__lockref_read(&dentry->d_lockref))) {
68248 __dentry_kill(dentry);
68249 dput(parent);
68250 goto restart;
68251@@ -923,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
68252 * We found an inuse dentry which was not removed from
68253 * the LRU because of laziness during lookup. Do not free it.
68254 */
68255- if (dentry->d_lockref.count > 0) {
68256+ if (__lockref_read(&dentry->d_lockref) > 0) {
68257 spin_unlock(&dentry->d_lock);
68258 if (parent)
68259 spin_unlock(&parent->d_lock);
68260@@ -961,8 +961,8 @@ static void shrink_dentry_list(struct list_head *list)
68261 dentry = parent;
68262 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
68263 parent = lock_parent(dentry);
68264- if (dentry->d_lockref.count != 1) {
68265- dentry->d_lockref.count--;
68266+ if (__lockref_read(&dentry->d_lockref) != 1) {
68267+ __lockref_inc(&dentry->d_lockref);
68268 spin_unlock(&dentry->d_lock);
68269 if (parent)
68270 spin_unlock(&parent->d_lock);
68271@@ -1002,7 +1002,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
68272 * counts, just remove them from the LRU. Otherwise give them
68273 * another pass through the LRU.
68274 */
68275- if (dentry->d_lockref.count) {
68276+ if (__lockref_read(&dentry->d_lockref)) {
68277 d_lru_isolate(lru, dentry);
68278 spin_unlock(&dentry->d_lock);
68279 return LRU_REMOVED;
68280@@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
68281 } else {
68282 if (dentry->d_flags & DCACHE_LRU_LIST)
68283 d_lru_del(dentry);
68284- if (!dentry->d_lockref.count) {
68285+ if (!__lockref_read(&dentry->d_lockref)) {
68286 d_shrink_add(dentry, &data->dispose);
68287 data->found++;
68288 }
68289@@ -1384,7 +1384,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
68290 return D_WALK_CONTINUE;
68291
68292 /* root with refcount 1 is fine */
68293- if (dentry == _data && dentry->d_lockref.count == 1)
68294+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
68295 return D_WALK_CONTINUE;
68296
68297 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
68298@@ -1393,7 +1393,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
68299 dentry->d_inode ?
68300 dentry->d_inode->i_ino : 0UL,
68301 dentry,
68302- dentry->d_lockref.count,
68303+ __lockref_read(&dentry->d_lockref),
68304 dentry->d_sb->s_type->name,
68305 dentry->d_sb->s_id);
68306 WARN_ON(1);
68307@@ -1534,7 +1534,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68308 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
68309 if (name->len > DNAME_INLINE_LEN-1) {
68310 size_t size = offsetof(struct external_name, name[1]);
68311- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
68312+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
68313 if (!p) {
68314 kmem_cache_free(dentry_cache, dentry);
68315 return NULL;
68316@@ -1557,7 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68317 smp_wmb();
68318 dentry->d_name.name = dname;
68319
68320- dentry->d_lockref.count = 1;
68321+ __lockref_set(&dentry->d_lockref, 1);
68322 dentry->d_flags = 0;
68323 spin_lock_init(&dentry->d_lock);
68324 seqcount_init(&dentry->d_seq);
68325@@ -1566,6 +1566,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68326 dentry->d_sb = sb;
68327 dentry->d_op = NULL;
68328 dentry->d_fsdata = NULL;
68329+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
68330+ atomic_set(&dentry->chroot_refcnt, 0);
68331+#endif
68332 INIT_HLIST_BL_NODE(&dentry->d_hash);
68333 INIT_LIST_HEAD(&dentry->d_lru);
68334 INIT_LIST_HEAD(&dentry->d_subdirs);
68335@@ -2290,7 +2293,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
68336 goto next;
68337 }
68338
68339- dentry->d_lockref.count++;
68340+ __lockref_inc(&dentry->d_lockref);
68341 found = dentry;
68342 spin_unlock(&dentry->d_lock);
68343 break;
68344@@ -2358,7 +2361,7 @@ again:
68345 spin_lock(&dentry->d_lock);
68346 inode = dentry->d_inode;
68347 isdir = S_ISDIR(inode->i_mode);
68348- if (dentry->d_lockref.count == 1) {
68349+ if (__lockref_read(&dentry->d_lockref) == 1) {
68350 if (!spin_trylock(&inode->i_lock)) {
68351 spin_unlock(&dentry->d_lock);
68352 cpu_relax();
68353@@ -3311,7 +3314,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
68354
68355 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
68356 dentry->d_flags |= DCACHE_GENOCIDE;
68357- dentry->d_lockref.count--;
68358+ __lockref_dec(&dentry->d_lockref);
68359 }
68360 }
68361 return D_WALK_CONTINUE;
68362@@ -3427,7 +3430,8 @@ void __init vfs_caches_init(unsigned long mempages)
68363 mempages -= reserve;
68364
68365 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
68366- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
68367+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
68368+ SLAB_NO_SANITIZE, NULL);
68369
68370 dcache_init();
68371 inode_init();
68372diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
68373index 96400ab..906103d 100644
68374--- a/fs/debugfs/inode.c
68375+++ b/fs/debugfs/inode.c
68376@@ -386,6 +386,10 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
68377 }
68378 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
68379
68380+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68381+extern int grsec_enable_sysfs_restrict;
68382+#endif
68383+
68384 /**
68385 * debugfs_create_dir - create a directory in the debugfs filesystem
68386 * @name: a pointer to a string containing the name of the directory to
68387@@ -404,6 +408,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
68388 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
68389 * returned.
68390 */
68391+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68392+extern int grsec_enable_sysfs_restrict;
68393+#endif
68394+
68395 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
68396 {
68397 struct dentry *dentry = start_creating(name, parent);
68398@@ -416,7 +424,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
68399 if (unlikely(!inode))
68400 return failed_creating(dentry);
68401
68402- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
68403+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68404+ if (grsec_enable_sysfs_restrict)
68405+ inode->i_mode = S_IFDIR | S_IRWXU;
68406+ else
68407+#endif
68408+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
68409 inode->i_op = &simple_dir_inode_operations;
68410 inode->i_fop = &simple_dir_operations;
68411
68412diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
68413index b08b518..d6acffa 100644
68414--- a/fs/ecryptfs/inode.c
68415+++ b/fs/ecryptfs/inode.c
68416@@ -663,7 +663,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
68417 old_fs = get_fs();
68418 set_fs(get_ds());
68419 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
68420- (char __user *)lower_buf,
68421+ (char __force_user *)lower_buf,
68422 PATH_MAX);
68423 set_fs(old_fs);
68424 if (rc < 0)
68425diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
68426index e4141f2..d8263e8 100644
68427--- a/fs/ecryptfs/miscdev.c
68428+++ b/fs/ecryptfs/miscdev.c
68429@@ -304,7 +304,7 @@ check_list:
68430 goto out_unlock_msg_ctx;
68431 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
68432 if (msg_ctx->msg) {
68433- if (copy_to_user(&buf[i], packet_length, packet_length_size))
68434+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
68435 goto out_unlock_msg_ctx;
68436 i += packet_length_size;
68437 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
68438diff --git a/fs/exec.c b/fs/exec.c
68439index 00400cf..b9dca28 100644
68440--- a/fs/exec.c
68441+++ b/fs/exec.c
68442@@ -56,8 +56,20 @@
68443 #include <linux/pipe_fs_i.h>
68444 #include <linux/oom.h>
68445 #include <linux/compat.h>
68446+#include <linux/random.h>
68447+#include <linux/seq_file.h>
68448+#include <linux/coredump.h>
68449+#include <linux/mman.h>
68450+
68451+#ifdef CONFIG_PAX_REFCOUNT
68452+#include <linux/kallsyms.h>
68453+#include <linux/kdebug.h>
68454+#endif
68455+
68456+#include <trace/events/fs.h>
68457
68458 #include <asm/uaccess.h>
68459+#include <asm/sections.h>
68460 #include <asm/mmu_context.h>
68461 #include <asm/tlb.h>
68462
68463@@ -66,19 +78,34 @@
68464
68465 #include <trace/events/sched.h>
68466
68467+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68468+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
68469+{
68470+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
68471+}
68472+#endif
68473+
68474+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
68475+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68476+EXPORT_SYMBOL(pax_set_initial_flags_func);
68477+#endif
68478+
68479 int suid_dumpable = 0;
68480
68481 static LIST_HEAD(formats);
68482 static DEFINE_RWLOCK(binfmt_lock);
68483
68484+extern int gr_process_kernel_exec_ban(void);
68485+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
68486+
68487 void __register_binfmt(struct linux_binfmt * fmt, int insert)
68488 {
68489 BUG_ON(!fmt);
68490 if (WARN_ON(!fmt->load_binary))
68491 return;
68492 write_lock(&binfmt_lock);
68493- insert ? list_add(&fmt->lh, &formats) :
68494- list_add_tail(&fmt->lh, &formats);
68495+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
68496+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
68497 write_unlock(&binfmt_lock);
68498 }
68499
68500@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
68501 void unregister_binfmt(struct linux_binfmt * fmt)
68502 {
68503 write_lock(&binfmt_lock);
68504- list_del(&fmt->lh);
68505+ pax_list_del((struct list_head *)&fmt->lh);
68506 write_unlock(&binfmt_lock);
68507 }
68508
68509@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
68510 int write)
68511 {
68512 struct page *page;
68513- int ret;
68514
68515-#ifdef CONFIG_STACK_GROWSUP
68516- if (write) {
68517- ret = expand_downwards(bprm->vma, pos);
68518- if (ret < 0)
68519- return NULL;
68520- }
68521-#endif
68522- ret = get_user_pages(current, bprm->mm, pos,
68523- 1, write, 1, &page, NULL);
68524- if (ret <= 0)
68525+ if (0 > expand_downwards(bprm->vma, pos))
68526+ return NULL;
68527+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
68528 return NULL;
68529
68530 if (write) {
68531@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
68532 if (size <= ARG_MAX)
68533 return page;
68534
68535+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68536+ // only allow 512KB for argv+env on suid/sgid binaries
68537+ // to prevent easy ASLR exhaustion
68538+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
68539+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
68540+ (size > (512 * 1024))) {
68541+ put_page(page);
68542+ return NULL;
68543+ }
68544+#endif
68545+
68546 /*
68547 * Limit to 1/4-th the stack size for the argv+env strings.
68548 * This ensures that:
68549@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
68550 vma->vm_end = STACK_TOP_MAX;
68551 vma->vm_start = vma->vm_end - PAGE_SIZE;
68552 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
68553+
68554+#ifdef CONFIG_PAX_SEGMEXEC
68555+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68556+#endif
68557+
68558 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68559 INIT_LIST_HEAD(&vma->anon_vma_chain);
68560
68561@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
68562 arch_bprm_mm_init(mm, vma);
68563 up_write(&mm->mmap_sem);
68564 bprm->p = vma->vm_end - sizeof(void *);
68565+
68566+#ifdef CONFIG_PAX_RANDUSTACK
68567+ if (randomize_va_space)
68568+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
68569+#endif
68570+
68571 return 0;
68572 err:
68573 up_write(&mm->mmap_sem);
68574@@ -396,7 +437,7 @@ struct user_arg_ptr {
68575 } ptr;
68576 };
68577
68578-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68579+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68580 {
68581 const char __user *native;
68582
68583@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68584 compat_uptr_t compat;
68585
68586 if (get_user(compat, argv.ptr.compat + nr))
68587- return ERR_PTR(-EFAULT);
68588+ return (const char __force_user *)ERR_PTR(-EFAULT);
68589
68590 return compat_ptr(compat);
68591 }
68592 #endif
68593
68594 if (get_user(native, argv.ptr.native + nr))
68595- return ERR_PTR(-EFAULT);
68596+ return (const char __force_user *)ERR_PTR(-EFAULT);
68597
68598 return native;
68599 }
68600@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
68601 if (!p)
68602 break;
68603
68604- if (IS_ERR(p))
68605+ if (IS_ERR((const char __force_kernel *)p))
68606 return -EFAULT;
68607
68608 if (i >= max)
68609@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
68610
68611 ret = -EFAULT;
68612 str = get_user_arg_ptr(argv, argc);
68613- if (IS_ERR(str))
68614+ if (IS_ERR((const char __force_kernel *)str))
68615 goto out;
68616
68617 len = strnlen_user(str, MAX_ARG_STRLEN);
68618@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
68619 int r;
68620 mm_segment_t oldfs = get_fs();
68621 struct user_arg_ptr argv = {
68622- .ptr.native = (const char __user *const __user *)__argv,
68623+ .ptr.native = (const char __user * const __force_user *)__argv,
68624 };
68625
68626 set_fs(KERNEL_DS);
68627@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
68628 unsigned long new_end = old_end - shift;
68629 struct mmu_gather tlb;
68630
68631- BUG_ON(new_start > new_end);
68632+ if (new_start >= new_end || new_start < mmap_min_addr)
68633+ return -ENOMEM;
68634
68635 /*
68636 * ensure there are no vmas between where we want to go
68637@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
68638 if (vma != find_vma(mm, new_start))
68639 return -EFAULT;
68640
68641+#ifdef CONFIG_PAX_SEGMEXEC
68642+ BUG_ON(pax_find_mirror_vma(vma));
68643+#endif
68644+
68645 /*
68646 * cover the whole range: [new_start, old_end)
68647 */
68648@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
68649 stack_top = arch_align_stack(stack_top);
68650 stack_top = PAGE_ALIGN(stack_top);
68651
68652- if (unlikely(stack_top < mmap_min_addr) ||
68653- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
68654- return -ENOMEM;
68655-
68656 stack_shift = vma->vm_end - stack_top;
68657
68658 bprm->p -= stack_shift;
68659@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
68660 bprm->exec -= stack_shift;
68661
68662 down_write(&mm->mmap_sem);
68663+
68664+ /* Move stack pages down in memory. */
68665+ if (stack_shift) {
68666+ ret = shift_arg_pages(vma, stack_shift);
68667+ if (ret)
68668+ goto out_unlock;
68669+ }
68670+
68671 vm_flags = VM_STACK_FLAGS;
68672
68673+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
68674+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
68675+ vm_flags &= ~VM_EXEC;
68676+
68677+#ifdef CONFIG_PAX_MPROTECT
68678+ if (mm->pax_flags & MF_PAX_MPROTECT)
68679+ vm_flags &= ~VM_MAYEXEC;
68680+#endif
68681+
68682+ }
68683+#endif
68684+
68685 /*
68686 * Adjust stack execute permissions; explicitly enable for
68687 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
68688@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
68689 goto out_unlock;
68690 BUG_ON(prev != vma);
68691
68692- /* Move stack pages down in memory. */
68693- if (stack_shift) {
68694- ret = shift_arg_pages(vma, stack_shift);
68695- if (ret)
68696- goto out_unlock;
68697- }
68698-
68699 /* mprotect_fixup is overkill to remove the temporary stack flags */
68700 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
68701
68702@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
68703 #endif
68704 current->mm->start_stack = bprm->p;
68705 ret = expand_stack(vma, stack_base);
68706+
68707+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
68708+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
68709+ unsigned long size;
68710+ vm_flags_t vm_flags;
68711+
68712+ size = STACK_TOP - vma->vm_end;
68713+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
68714+
68715+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
68716+
68717+#ifdef CONFIG_X86
68718+ if (!ret) {
68719+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
68720+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
68721+ }
68722+#endif
68723+
68724+ }
68725+#endif
68726+
68727 if (ret)
68728 ret = -EFAULT;
68729
68730@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
68731 if (err)
68732 goto exit;
68733
68734- if (name->name[0] != '\0')
68735+ if (name->name[0] != '\0') {
68736 fsnotify_open(file);
68737+ trace_open_exec(name->name);
68738+ }
68739
68740 out:
68741 return file;
68742@@ -815,7 +893,7 @@ int kernel_read(struct file *file, loff_t offset,
68743 old_fs = get_fs();
68744 set_fs(get_ds());
68745 /* The cast to a user pointer is valid due to the set_fs() */
68746- result = vfs_read(file, (void __user *)addr, count, &pos);
68747+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
68748 set_fs(old_fs);
68749 return result;
68750 }
68751@@ -860,6 +938,7 @@ static int exec_mmap(struct mm_struct *mm)
68752 tsk->mm = mm;
68753 tsk->active_mm = mm;
68754 activate_mm(active_mm, mm);
68755+ populate_stack();
68756 tsk->mm->vmacache_seqnum = 0;
68757 vmacache_flush(tsk);
68758 task_unlock(tsk);
68759@@ -926,10 +1005,14 @@ static int de_thread(struct task_struct *tsk)
68760 if (!thread_group_leader(tsk)) {
68761 struct task_struct *leader = tsk->group_leader;
68762
68763- sig->notify_count = -1; /* for exit_notify() */
68764 for (;;) {
68765 threadgroup_change_begin(tsk);
68766 write_lock_irq(&tasklist_lock);
68767+ /*
68768+ * Do this under tasklist_lock to ensure that
68769+ * exit_notify() can't miss ->group_exit_task
68770+ */
68771+ sig->notify_count = -1;
68772 if (likely(leader->exit_state))
68773 break;
68774 __set_current_state(TASK_KILLABLE);
68775@@ -1258,7 +1341,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
68776 }
68777 rcu_read_unlock();
68778
68779- if (p->fs->users > n_fs)
68780+ if (atomic_read(&p->fs->users) > n_fs)
68781 bprm->unsafe |= LSM_UNSAFE_SHARE;
68782 else
68783 p->fs->in_exec = 1;
68784@@ -1459,6 +1542,31 @@ static int exec_binprm(struct linux_binprm *bprm)
68785 return ret;
68786 }
68787
68788+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68789+static DEFINE_PER_CPU(u64, exec_counter);
68790+static int __init init_exec_counters(void)
68791+{
68792+ unsigned int cpu;
68793+
68794+ for_each_possible_cpu(cpu) {
68795+ per_cpu(exec_counter, cpu) = (u64)cpu;
68796+ }
68797+
68798+ return 0;
68799+}
68800+early_initcall(init_exec_counters);
68801+static inline void increment_exec_counter(void)
68802+{
68803+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
68804+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
68805+}
68806+#else
68807+static inline void increment_exec_counter(void) {}
68808+#endif
68809+
68810+extern void gr_handle_exec_args(struct linux_binprm *bprm,
68811+ struct user_arg_ptr argv);
68812+
68813 /*
68814 * sys_execve() executes a new program.
68815 */
68816@@ -1467,6 +1575,11 @@ static int do_execveat_common(int fd, struct filename *filename,
68817 struct user_arg_ptr envp,
68818 int flags)
68819 {
68820+#ifdef CONFIG_GRKERNSEC
68821+ struct file *old_exec_file;
68822+ struct acl_subject_label *old_acl;
68823+ struct rlimit old_rlim[RLIM_NLIMITS];
68824+#endif
68825 char *pathbuf = NULL;
68826 struct linux_binprm *bprm;
68827 struct file *file;
68828@@ -1476,6 +1589,8 @@ static int do_execveat_common(int fd, struct filename *filename,
68829 if (IS_ERR(filename))
68830 return PTR_ERR(filename);
68831
68832+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
68833+
68834 /*
68835 * We move the actual failure in case of RLIMIT_NPROC excess from
68836 * set*uid() to execve() because too many poorly written programs
68837@@ -1513,6 +1628,11 @@ static int do_execveat_common(int fd, struct filename *filename,
68838 if (IS_ERR(file))
68839 goto out_unmark;
68840
68841+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
68842+ retval = -EPERM;
68843+ goto out_unmark;
68844+ }
68845+
68846 sched_exec();
68847
68848 bprm->file = file;
68849@@ -1539,6 +1659,11 @@ static int do_execveat_common(int fd, struct filename *filename,
68850 }
68851 bprm->interp = bprm->filename;
68852
68853+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
68854+ retval = -EACCES;
68855+ goto out_unmark;
68856+ }
68857+
68858 retval = bprm_mm_init(bprm);
68859 if (retval)
68860 goto out_unmark;
68861@@ -1555,24 +1680,70 @@ static int do_execveat_common(int fd, struct filename *filename,
68862 if (retval < 0)
68863 goto out;
68864
68865+#ifdef CONFIG_GRKERNSEC
68866+ old_acl = current->acl;
68867+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
68868+ old_exec_file = current->exec_file;
68869+ get_file(file);
68870+ current->exec_file = file;
68871+#endif
68872+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68873+ /* limit suid stack to 8MB
68874+ * we saved the old limits above and will restore them if this exec fails
68875+ */
68876+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
68877+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
68878+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
68879+#endif
68880+
68881+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
68882+ retval = -EPERM;
68883+ goto out_fail;
68884+ }
68885+
68886+ if (!gr_tpe_allow(file)) {
68887+ retval = -EACCES;
68888+ goto out_fail;
68889+ }
68890+
68891+ if (gr_check_crash_exec(file)) {
68892+ retval = -EACCES;
68893+ goto out_fail;
68894+ }
68895+
68896+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
68897+ bprm->unsafe);
68898+ if (retval < 0)
68899+ goto out_fail;
68900+
68901 retval = copy_strings_kernel(1, &bprm->filename, bprm);
68902 if (retval < 0)
68903- goto out;
68904+ goto out_fail;
68905
68906 bprm->exec = bprm->p;
68907 retval = copy_strings(bprm->envc, envp, bprm);
68908 if (retval < 0)
68909- goto out;
68910+ goto out_fail;
68911
68912 retval = copy_strings(bprm->argc, argv, bprm);
68913 if (retval < 0)
68914- goto out;
68915+ goto out_fail;
68916+
68917+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
68918+
68919+ gr_handle_exec_args(bprm, argv);
68920
68921 retval = exec_binprm(bprm);
68922 if (retval < 0)
68923- goto out;
68924+ goto out_fail;
68925+#ifdef CONFIG_GRKERNSEC
68926+ if (old_exec_file)
68927+ fput(old_exec_file);
68928+#endif
68929
68930 /* execve succeeded */
68931+
68932+ increment_exec_counter();
68933 current->fs->in_exec = 0;
68934 current->in_execve = 0;
68935 acct_update_integrals(current);
68936@@ -1584,6 +1755,14 @@ static int do_execveat_common(int fd, struct filename *filename,
68937 put_files_struct(displaced);
68938 return retval;
68939
68940+out_fail:
68941+#ifdef CONFIG_GRKERNSEC
68942+ current->acl = old_acl;
68943+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
68944+ fput(current->exec_file);
68945+ current->exec_file = old_exec_file;
68946+#endif
68947+
68948 out:
68949 if (bprm->mm) {
68950 acct_arg_size(bprm, 0);
68951@@ -1730,3 +1909,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
68952 argv, envp, flags);
68953 }
68954 #endif
68955+
68956+int pax_check_flags(unsigned long *flags)
68957+{
68958+ int retval = 0;
68959+
68960+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
68961+ if (*flags & MF_PAX_SEGMEXEC)
68962+ {
68963+ *flags &= ~MF_PAX_SEGMEXEC;
68964+ retval = -EINVAL;
68965+ }
68966+#endif
68967+
68968+ if ((*flags & MF_PAX_PAGEEXEC)
68969+
68970+#ifdef CONFIG_PAX_PAGEEXEC
68971+ && (*flags & MF_PAX_SEGMEXEC)
68972+#endif
68973+
68974+ )
68975+ {
68976+ *flags &= ~MF_PAX_PAGEEXEC;
68977+ retval = -EINVAL;
68978+ }
68979+
68980+ if ((*flags & MF_PAX_MPROTECT)
68981+
68982+#ifdef CONFIG_PAX_MPROTECT
68983+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
68984+#endif
68985+
68986+ )
68987+ {
68988+ *flags &= ~MF_PAX_MPROTECT;
68989+ retval = -EINVAL;
68990+ }
68991+
68992+ if ((*flags & MF_PAX_EMUTRAMP)
68993+
68994+#ifdef CONFIG_PAX_EMUTRAMP
68995+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
68996+#endif
68997+
68998+ )
68999+ {
69000+ *flags &= ~MF_PAX_EMUTRAMP;
69001+ retval = -EINVAL;
69002+ }
69003+
69004+ return retval;
69005+}
69006+
69007+EXPORT_SYMBOL(pax_check_flags);
69008+
69009+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69010+char *pax_get_path(const struct path *path, char *buf, int buflen)
69011+{
69012+ char *pathname = d_path(path, buf, buflen);
69013+
69014+ if (IS_ERR(pathname))
69015+ goto toolong;
69016+
69017+ pathname = mangle_path(buf, pathname, "\t\n\\");
69018+ if (!pathname)
69019+ goto toolong;
69020+
69021+ *pathname = 0;
69022+ return buf;
69023+
69024+toolong:
69025+ return "<path too long>";
69026+}
69027+EXPORT_SYMBOL(pax_get_path);
69028+
69029+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
69030+{
69031+ struct task_struct *tsk = current;
69032+ struct mm_struct *mm = current->mm;
69033+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
69034+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
69035+ char *path_exec = NULL;
69036+ char *path_fault = NULL;
69037+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
69038+ siginfo_t info = { };
69039+
69040+ if (buffer_exec && buffer_fault) {
69041+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
69042+
69043+ down_read(&mm->mmap_sem);
69044+ vma = mm->mmap;
69045+ while (vma && (!vma_exec || !vma_fault)) {
69046+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
69047+ vma_exec = vma;
69048+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
69049+ vma_fault = vma;
69050+ vma = vma->vm_next;
69051+ }
69052+ if (vma_exec)
69053+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
69054+ if (vma_fault) {
69055+ start = vma_fault->vm_start;
69056+ end = vma_fault->vm_end;
69057+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
69058+ if (vma_fault->vm_file)
69059+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
69060+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
69061+ path_fault = "<heap>";
69062+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
69063+ path_fault = "<stack>";
69064+ else
69065+ path_fault = "<anonymous mapping>";
69066+ }
69067+ up_read(&mm->mmap_sem);
69068+ }
69069+ if (tsk->signal->curr_ip)
69070+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
69071+ else
69072+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
69073+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
69074+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
69075+ free_page((unsigned long)buffer_exec);
69076+ free_page((unsigned long)buffer_fault);
69077+ pax_report_insns(regs, pc, sp);
69078+ info.si_signo = SIGKILL;
69079+ info.si_errno = 0;
69080+ info.si_code = SI_KERNEL;
69081+ info.si_pid = 0;
69082+ info.si_uid = 0;
69083+ do_coredump(&info);
69084+}
69085+#endif
69086+
69087+#ifdef CONFIG_PAX_REFCOUNT
69088+void pax_report_refcount_overflow(struct pt_regs *regs)
69089+{
69090+ if (current->signal->curr_ip)
69091+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
69092+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
69093+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
69094+ else
69095+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
69096+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
69097+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
69098+ preempt_disable();
69099+ show_regs(regs);
69100+ preempt_enable();
69101+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
69102+}
69103+#endif
69104+
69105+#ifdef CONFIG_PAX_USERCOPY
69106+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
69107+static noinline int check_stack_object(const void *obj, unsigned long len)
69108+{
69109+ const void * const stack = task_stack_page(current);
69110+ const void * const stackend = stack + THREAD_SIZE;
69111+
69112+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
69113+ const void *frame = NULL;
69114+ const void *oldframe;
69115+#endif
69116+
69117+ if (obj + len < obj)
69118+ return -1;
69119+
69120+ if (obj + len <= stack || stackend <= obj)
69121+ return 0;
69122+
69123+ if (obj < stack || stackend < obj + len)
69124+ return -1;
69125+
69126+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
69127+ oldframe = __builtin_frame_address(1);
69128+ if (oldframe)
69129+ frame = __builtin_frame_address(2);
69130+ /*
69131+ low ----------------------------------------------> high
69132+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
69133+ ^----------------^
69134+ allow copies only within here
69135+ */
69136+ while (stack <= frame && frame < stackend) {
69137+ /* if obj + len extends past the last frame, this
69138+ check won't pass and the next frame will be 0,
69139+ causing us to bail out and correctly report
69140+ the copy as invalid
69141+ */
69142+ if (obj + len <= frame)
69143+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
69144+ oldframe = frame;
69145+ frame = *(const void * const *)frame;
69146+ }
69147+ return -1;
69148+#else
69149+ return 1;
69150+#endif
69151+}
69152+
69153+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
69154+{
69155+ if (current->signal->curr_ip)
69156+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
69157+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
69158+ else
69159+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
69160+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
69161+ dump_stack();
69162+ gr_handle_kernel_exploit();
69163+ do_group_exit(SIGKILL);
69164+}
69165+#endif
69166+
69167+#ifdef CONFIG_PAX_USERCOPY
69168+
69169+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
69170+{
69171+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69172+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
69173+#ifdef CONFIG_MODULES
69174+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
69175+#else
69176+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
69177+#endif
69178+
69179+#else
69180+ unsigned long textlow = (unsigned long)_stext;
69181+ unsigned long texthigh = (unsigned long)_etext;
69182+
69183+#ifdef CONFIG_X86_64
69184+ /* check against linear mapping as well */
69185+ if (high > (unsigned long)__va(__pa(textlow)) &&
69186+ low < (unsigned long)__va(__pa(texthigh)))
69187+ return true;
69188+#endif
69189+
69190+#endif
69191+
69192+ if (high <= textlow || low >= texthigh)
69193+ return false;
69194+ else
69195+ return true;
69196+}
69197+#endif
69198+
69199+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
69200+{
69201+#ifdef CONFIG_PAX_USERCOPY
69202+ const char *type;
69203+#endif
69204+
69205+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
69206+ unsigned long stackstart = (unsigned long)task_stack_page(current);
69207+ unsigned long currentsp = (unsigned long)&stackstart;
69208+ if (unlikely((currentsp < stackstart + 512 ||
69209+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
69210+ BUG();
69211+#endif
69212+
69213+#ifndef CONFIG_PAX_USERCOPY_DEBUG
69214+ if (const_size)
69215+ return;
69216+#endif
69217+
69218+#ifdef CONFIG_PAX_USERCOPY
69219+ if (!n)
69220+ return;
69221+
69222+ type = check_heap_object(ptr, n);
69223+ if (!type) {
69224+ int ret = check_stack_object(ptr, n);
69225+ if (ret == 1 || ret == 2)
69226+ return;
69227+ if (ret == 0) {
69228+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
69229+ type = "<kernel text>";
69230+ else
69231+ return;
69232+ } else
69233+ type = "<process stack>";
69234+ }
69235+
69236+ pax_report_usercopy(ptr, n, to_user, type);
69237+#endif
69238+
69239+}
69240+EXPORT_SYMBOL(__check_object_size);
69241+
69242+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69243+void pax_track_stack(void)
69244+{
69245+ unsigned long sp = (unsigned long)&sp;
69246+ if (sp < current_thread_info()->lowest_stack &&
69247+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
69248+ current_thread_info()->lowest_stack = sp;
69249+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
69250+ BUG();
69251+}
69252+EXPORT_SYMBOL(pax_track_stack);
69253+#endif
69254+
69255+#ifdef CONFIG_PAX_SIZE_OVERFLOW
69256+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
69257+{
69258+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
69259+ dump_stack();
69260+ do_group_exit(SIGKILL);
69261+}
69262+EXPORT_SYMBOL(report_size_overflow);
69263+#endif
69264diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
69265index 9f9992b..8b59411 100644
69266--- a/fs/ext2/balloc.c
69267+++ b/fs/ext2/balloc.c
69268@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
69269
69270 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
69271 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
69272- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
69273+ if (free_blocks < root_blocks + 1 &&
69274 !uid_eq(sbi->s_resuid, current_fsuid()) &&
69275 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
69276- !in_group_p (sbi->s_resgid))) {
69277+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
69278 return 0;
69279 }
69280 return 1;
69281diff --git a/fs/ext2/super.c b/fs/ext2/super.c
69282index d0e746e..82e06f0 100644
69283--- a/fs/ext2/super.c
69284+++ b/fs/ext2/super.c
69285@@ -267,10 +267,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
69286 #ifdef CONFIG_EXT2_FS_XATTR
69287 if (test_opt(sb, XATTR_USER))
69288 seq_puts(seq, ",user_xattr");
69289- if (!test_opt(sb, XATTR_USER) &&
69290- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
69291+ if (!test_opt(sb, XATTR_USER))
69292 seq_puts(seq, ",nouser_xattr");
69293- }
69294 #endif
69295
69296 #ifdef CONFIG_EXT2_FS_POSIX_ACL
69297@@ -856,8 +854,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
69298 if (def_mount_opts & EXT2_DEFM_UID16)
69299 set_opt(sbi->s_mount_opt, NO_UID32);
69300 #ifdef CONFIG_EXT2_FS_XATTR
69301- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
69302- set_opt(sbi->s_mount_opt, XATTR_USER);
69303+ /* always enable user xattrs */
69304+ set_opt(sbi->s_mount_opt, XATTR_USER);
69305 #endif
69306 #ifdef CONFIG_EXT2_FS_POSIX_ACL
69307 if (def_mount_opts & EXT2_DEFM_ACL)
69308diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
69309index 9142614..97484fa 100644
69310--- a/fs/ext2/xattr.c
69311+++ b/fs/ext2/xattr.c
69312@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
69313 struct buffer_head *bh = NULL;
69314 struct ext2_xattr_entry *entry;
69315 char *end;
69316- size_t rest = buffer_size;
69317+ size_t rest = buffer_size, total_size = 0;
69318 int error;
69319
69320 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
69321@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
69322 buffer += size;
69323 }
69324 rest -= size;
69325+ total_size += size;
69326 }
69327 }
69328- error = buffer_size - rest; /* total size */
69329+ error = total_size;
69330
69331 cleanup:
69332 brelse(bh);
69333diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
69334index 158b5d4..2432610 100644
69335--- a/fs/ext3/balloc.c
69336+++ b/fs/ext3/balloc.c
69337@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
69338
69339 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
69340 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
69341- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
69342+ if (free_blocks < root_blocks + 1 &&
69343 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
69344 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
69345- !in_group_p (sbi->s_resgid))) {
69346+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
69347 return 0;
69348 }
69349 return 1;
69350diff --git a/fs/ext3/super.c b/fs/ext3/super.c
69351index d4dbf3c..906a6fb 100644
69352--- a/fs/ext3/super.c
69353+++ b/fs/ext3/super.c
69354@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
69355 #ifdef CONFIG_EXT3_FS_XATTR
69356 if (test_opt(sb, XATTR_USER))
69357 seq_puts(seq, ",user_xattr");
69358- if (!test_opt(sb, XATTR_USER) &&
69359- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
69360+ if (!test_opt(sb, XATTR_USER))
69361 seq_puts(seq, ",nouser_xattr");
69362- }
69363 #endif
69364 #ifdef CONFIG_EXT3_FS_POSIX_ACL
69365 if (test_opt(sb, POSIX_ACL))
69366@@ -1760,8 +1758,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
69367 if (def_mount_opts & EXT3_DEFM_UID16)
69368 set_opt(sbi->s_mount_opt, NO_UID32);
69369 #ifdef CONFIG_EXT3_FS_XATTR
69370- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
69371- set_opt(sbi->s_mount_opt, XATTR_USER);
69372+ /* always enable user xattrs */
69373+ set_opt(sbi->s_mount_opt, XATTR_USER);
69374 #endif
69375 #ifdef CONFIG_EXT3_FS_POSIX_ACL
69376 if (def_mount_opts & EXT3_DEFM_ACL)
69377diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
69378index c6874be..f8a6ae8 100644
69379--- a/fs/ext3/xattr.c
69380+++ b/fs/ext3/xattr.c
69381@@ -330,7 +330,7 @@ static int
69382 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
69383 char *buffer, size_t buffer_size)
69384 {
69385- size_t rest = buffer_size;
69386+ size_t rest = buffer_size, total_size = 0;
69387
69388 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
69389 const struct xattr_handler *handler =
69390@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
69391 buffer += size;
69392 }
69393 rest -= size;
69394+ total_size += size;
69395 }
69396 }
69397- return buffer_size - rest;
69398+ return total_size;
69399 }
69400
69401 static int
69402diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
69403index 83a6f49..d4e4d03 100644
69404--- a/fs/ext4/balloc.c
69405+++ b/fs/ext4/balloc.c
69406@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
69407 /* Hm, nope. Are (enough) root reserved clusters available? */
69408 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
69409 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
69410- capable(CAP_SYS_RESOURCE) ||
69411- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
69412+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
69413+ capable_nolog(CAP_SYS_RESOURCE)) {
69414
69415 if (free_clusters >= (nclusters + dirty_clusters +
69416 resv_clusters))
69417diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
69418index f63c3d5..3c1a033 100644
69419--- a/fs/ext4/ext4.h
69420+++ b/fs/ext4/ext4.h
69421@@ -1287,19 +1287,19 @@ struct ext4_sb_info {
69422 unsigned long s_mb_last_start;
69423
69424 /* stats for buddy allocator */
69425- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
69426- atomic_t s_bal_success; /* we found long enough chunks */
69427- atomic_t s_bal_allocated; /* in blocks */
69428- atomic_t s_bal_ex_scanned; /* total extents scanned */
69429- atomic_t s_bal_goals; /* goal hits */
69430- atomic_t s_bal_breaks; /* too long searches */
69431- atomic_t s_bal_2orders; /* 2^order hits */
69432+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
69433+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
69434+ atomic_unchecked_t s_bal_allocated; /* in blocks */
69435+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
69436+ atomic_unchecked_t s_bal_goals; /* goal hits */
69437+ atomic_unchecked_t s_bal_breaks; /* too long searches */
69438+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
69439 spinlock_t s_bal_lock;
69440 unsigned long s_mb_buddies_generated;
69441 unsigned long long s_mb_generation_time;
69442- atomic_t s_mb_lost_chunks;
69443- atomic_t s_mb_preallocated;
69444- atomic_t s_mb_discarded;
69445+ atomic_unchecked_t s_mb_lost_chunks;
69446+ atomic_unchecked_t s_mb_preallocated;
69447+ atomic_unchecked_t s_mb_discarded;
69448 atomic_t s_lock_busy;
69449
69450 /* locality groups */
69451diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
69452index 8d1e602..abf497b 100644
69453--- a/fs/ext4/mballoc.c
69454+++ b/fs/ext4/mballoc.c
69455@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
69456 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
69457
69458 if (EXT4_SB(sb)->s_mb_stats)
69459- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
69460+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
69461
69462 break;
69463 }
69464@@ -2211,7 +2211,7 @@ repeat:
69465 ac->ac_status = AC_STATUS_CONTINUE;
69466 ac->ac_flags |= EXT4_MB_HINT_FIRST;
69467 cr = 3;
69468- atomic_inc(&sbi->s_mb_lost_chunks);
69469+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
69470 goto repeat;
69471 }
69472 }
69473@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
69474 if (sbi->s_mb_stats) {
69475 ext4_msg(sb, KERN_INFO,
69476 "mballoc: %u blocks %u reqs (%u success)",
69477- atomic_read(&sbi->s_bal_allocated),
69478- atomic_read(&sbi->s_bal_reqs),
69479- atomic_read(&sbi->s_bal_success));
69480+ atomic_read_unchecked(&sbi->s_bal_allocated),
69481+ atomic_read_unchecked(&sbi->s_bal_reqs),
69482+ atomic_read_unchecked(&sbi->s_bal_success));
69483 ext4_msg(sb, KERN_INFO,
69484 "mballoc: %u extents scanned, %u goal hits, "
69485 "%u 2^N hits, %u breaks, %u lost",
69486- atomic_read(&sbi->s_bal_ex_scanned),
69487- atomic_read(&sbi->s_bal_goals),
69488- atomic_read(&sbi->s_bal_2orders),
69489- atomic_read(&sbi->s_bal_breaks),
69490- atomic_read(&sbi->s_mb_lost_chunks));
69491+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
69492+ atomic_read_unchecked(&sbi->s_bal_goals),
69493+ atomic_read_unchecked(&sbi->s_bal_2orders),
69494+ atomic_read_unchecked(&sbi->s_bal_breaks),
69495+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
69496 ext4_msg(sb, KERN_INFO,
69497 "mballoc: %lu generated and it took %Lu",
69498 sbi->s_mb_buddies_generated,
69499 sbi->s_mb_generation_time);
69500 ext4_msg(sb, KERN_INFO,
69501 "mballoc: %u preallocated, %u discarded",
69502- atomic_read(&sbi->s_mb_preallocated),
69503- atomic_read(&sbi->s_mb_discarded));
69504+ atomic_read_unchecked(&sbi->s_mb_preallocated),
69505+ atomic_read_unchecked(&sbi->s_mb_discarded));
69506 }
69507
69508 free_percpu(sbi->s_locality_groups);
69509@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
69510 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
69511
69512 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
69513- atomic_inc(&sbi->s_bal_reqs);
69514- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
69515+ atomic_inc_unchecked(&sbi->s_bal_reqs);
69516+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
69517 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
69518- atomic_inc(&sbi->s_bal_success);
69519- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
69520+ atomic_inc_unchecked(&sbi->s_bal_success);
69521+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
69522 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
69523 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
69524- atomic_inc(&sbi->s_bal_goals);
69525+ atomic_inc_unchecked(&sbi->s_bal_goals);
69526 if (ac->ac_found > sbi->s_mb_max_to_scan)
69527- atomic_inc(&sbi->s_bal_breaks);
69528+ atomic_inc_unchecked(&sbi->s_bal_breaks);
69529 }
69530
69531 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
69532@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
69533 trace_ext4_mb_new_inode_pa(ac, pa);
69534
69535 ext4_mb_use_inode_pa(ac, pa);
69536- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
69537+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
69538
69539 ei = EXT4_I(ac->ac_inode);
69540 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
69541@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
69542 trace_ext4_mb_new_group_pa(ac, pa);
69543
69544 ext4_mb_use_group_pa(ac, pa);
69545- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
69546+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
69547
69548 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
69549 lg = ac->ac_lg;
69550@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
69551 * from the bitmap and continue.
69552 */
69553 }
69554- atomic_add(free, &sbi->s_mb_discarded);
69555+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
69556
69557 return err;
69558 }
69559@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
69560 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
69561 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
69562 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
69563- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
69564+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
69565 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
69566
69567 return 0;
69568diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
69569index 8313ca3..8a37d08 100644
69570--- a/fs/ext4/mmp.c
69571+++ b/fs/ext4/mmp.c
69572@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
69573 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
69574 const char *function, unsigned int line, const char *msg)
69575 {
69576- __ext4_warning(sb, function, line, msg);
69577+ __ext4_warning(sb, function, line, "%s", msg);
69578 __ext4_warning(sb, function, line,
69579 "MMP failure info: last update time: %llu, last update "
69580 "node: %s, last update device: %s\n",
69581diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
69582index 8a8ec62..1b02de5 100644
69583--- a/fs/ext4/resize.c
69584+++ b/fs/ext4/resize.c
69585@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69586
69587 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
69588 for (count2 = count; count > 0; count -= count2, block += count2) {
69589- ext4_fsblk_t start;
69590+ ext4_fsblk_t start, diff;
69591 struct buffer_head *bh;
69592 ext4_group_t group;
69593 int err;
69594@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69595 start = ext4_group_first_block_no(sb, group);
69596 group -= flex_gd->groups[0].group;
69597
69598- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
69599- if (count2 > count)
69600- count2 = count;
69601-
69602 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
69603 BUG_ON(flex_gd->count > 1);
69604 continue;
69605@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69606 err = ext4_journal_get_write_access(handle, bh);
69607 if (err)
69608 return err;
69609+
69610+ diff = block - start;
69611+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
69612+ if (count2 > count)
69613+ count2 = count;
69614+
69615 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
69616- block - start, count2);
69617- ext4_set_bits(bh->b_data, block - start, count2);
69618+ diff, count2);
69619+ ext4_set_bits(bh->b_data, diff, count2);
69620
69621 err = ext4_handle_dirty_metadata(handle, NULL, bh);
69622 if (unlikely(err))
69623diff --git a/fs/ext4/super.c b/fs/ext4/super.c
69624index e061e66..87bc092 100644
69625--- a/fs/ext4/super.c
69626+++ b/fs/ext4/super.c
69627@@ -1243,7 +1243,7 @@ static ext4_fsblk_t get_sb_block(void **data)
69628 }
69629
69630 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
69631-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
69632+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
69633 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
69634
69635 #ifdef CONFIG_QUOTA
69636@@ -2443,7 +2443,7 @@ struct ext4_attr {
69637 int offset;
69638 int deprecated_val;
69639 } u;
69640-};
69641+} __do_const;
69642
69643 static int parse_strtoull(const char *buf,
69644 unsigned long long max, unsigned long long *value)
69645diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
69646index 1e09fc7..0400dd4 100644
69647--- a/fs/ext4/xattr.c
69648+++ b/fs/ext4/xattr.c
69649@@ -399,7 +399,7 @@ static int
69650 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
69651 char *buffer, size_t buffer_size)
69652 {
69653- size_t rest = buffer_size;
69654+ size_t rest = buffer_size, total_size = 0;
69655
69656 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
69657 const struct xattr_handler *handler =
69658@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
69659 buffer += size;
69660 }
69661 rest -= size;
69662+ total_size += size;
69663 }
69664 }
69665- return buffer_size - rest;
69666+ return total_size;
69667 }
69668
69669 static int
69670diff --git a/fs/fcntl.c b/fs/fcntl.c
69671index ee85cd4..9dd0d20 100644
69672--- a/fs/fcntl.c
69673+++ b/fs/fcntl.c
69674@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
69675 int force)
69676 {
69677 security_file_set_fowner(filp);
69678+ if (gr_handle_chroot_fowner(pid, type))
69679+ return;
69680+ if (gr_check_protected_task_fowner(pid, type))
69681+ return;
69682 f_modown(filp, pid, type, force);
69683 }
69684 EXPORT_SYMBOL(__f_setown);
69685diff --git a/fs/fhandle.c b/fs/fhandle.c
69686index 999ff5c..2281df9 100644
69687--- a/fs/fhandle.c
69688+++ b/fs/fhandle.c
69689@@ -8,6 +8,7 @@
69690 #include <linux/fs_struct.h>
69691 #include <linux/fsnotify.h>
69692 #include <linux/personality.h>
69693+#include <linux/grsecurity.h>
69694 #include <asm/uaccess.h>
69695 #include "internal.h"
69696 #include "mount.h"
69697@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
69698 } else
69699 retval = 0;
69700 /* copy the mount id */
69701- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
69702- sizeof(*mnt_id)) ||
69703+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
69704 copy_to_user(ufh, handle,
69705 sizeof(struct file_handle) + handle_bytes))
69706 retval = -EFAULT;
69707@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
69708 * the directory. Ideally we would like CAP_DAC_SEARCH.
69709 * But we don't have that
69710 */
69711- if (!capable(CAP_DAC_READ_SEARCH)) {
69712+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
69713 retval = -EPERM;
69714 goto out_err;
69715 }
69716@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
69717 goto out_err;
69718 }
69719 /* copy the full handle */
69720- if (copy_from_user(handle, ufh,
69721- sizeof(struct file_handle) +
69722+ *handle = f_handle;
69723+ if (copy_from_user(&handle->f_handle,
69724+ &ufh->f_handle,
69725 f_handle.handle_bytes)) {
69726 retval = -EFAULT;
69727 goto out_handle;
69728diff --git a/fs/file.c b/fs/file.c
69729index ee738ea..f6c15629 100644
69730--- a/fs/file.c
69731+++ b/fs/file.c
69732@@ -16,6 +16,7 @@
69733 #include <linux/slab.h>
69734 #include <linux/vmalloc.h>
69735 #include <linux/file.h>
69736+#include <linux/security.h>
69737 #include <linux/fdtable.h>
69738 #include <linux/bitops.h>
69739 #include <linux/interrupt.h>
69740@@ -139,7 +140,7 @@ out:
69741 * Return <0 error code on error; 1 on successful completion.
69742 * The files->file_lock should be held on entry, and will be held on exit.
69743 */
69744-static int expand_fdtable(struct files_struct *files, int nr)
69745+static int expand_fdtable(struct files_struct *files, unsigned int nr)
69746 __releases(files->file_lock)
69747 __acquires(files->file_lock)
69748 {
69749@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
69750 * expanded and execution may have blocked.
69751 * The files->file_lock should be held on entry, and will be held on exit.
69752 */
69753-static int expand_files(struct files_struct *files, int nr)
69754+static int expand_files(struct files_struct *files, unsigned int nr)
69755 {
69756 struct fdtable *fdt;
69757
69758@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
69759 if (!file)
69760 return __close_fd(files, fd);
69761
69762+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
69763 if (fd >= rlimit(RLIMIT_NOFILE))
69764 return -EBADF;
69765
69766@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
69767 if (unlikely(oldfd == newfd))
69768 return -EINVAL;
69769
69770+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
69771 if (newfd >= rlimit(RLIMIT_NOFILE))
69772 return -EBADF;
69773
69774@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
69775 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
69776 {
69777 int err;
69778+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
69779 if (from >= rlimit(RLIMIT_NOFILE))
69780 return -EINVAL;
69781 err = alloc_fd(from, flags);
69782diff --git a/fs/filesystems.c b/fs/filesystems.c
69783index 5797d45..7d7d79a 100644
69784--- a/fs/filesystems.c
69785+++ b/fs/filesystems.c
69786@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
69787 int len = dot ? dot - name : strlen(name);
69788
69789 fs = __get_fs_type(name, len);
69790+#ifdef CONFIG_GRKERNSEC_MODHARDEN
69791+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
69792+#else
69793 if (!fs && (request_module("fs-%.*s", len, name) == 0))
69794+#endif
69795 fs = __get_fs_type(name, len);
69796
69797 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
69798diff --git a/fs/fs_struct.c b/fs/fs_struct.c
69799index 7dca743..1ff87ae 100644
69800--- a/fs/fs_struct.c
69801+++ b/fs/fs_struct.c
69802@@ -4,6 +4,7 @@
69803 #include <linux/path.h>
69804 #include <linux/slab.h>
69805 #include <linux/fs_struct.h>
69806+#include <linux/grsecurity.h>
69807 #include "internal.h"
69808
69809 /*
69810@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
69811 struct path old_root;
69812
69813 path_get(path);
69814+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
69815 spin_lock(&fs->lock);
69816 write_seqcount_begin(&fs->seq);
69817 old_root = fs->root;
69818 fs->root = *path;
69819+ gr_set_chroot_entries(current, path);
69820 write_seqcount_end(&fs->seq);
69821 spin_unlock(&fs->lock);
69822- if (old_root.dentry)
69823+ if (old_root.dentry) {
69824+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
69825 path_put(&old_root);
69826+ }
69827 }
69828
69829 /*
69830@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
69831 int hits = 0;
69832 spin_lock(&fs->lock);
69833 write_seqcount_begin(&fs->seq);
69834+ /* this root replacement is only done by pivot_root,
69835+ leave grsec's chroot tagging alone for this task
69836+ so that a pivoted root isn't treated as a chroot
69837+ */
69838 hits += replace_path(&fs->root, old_root, new_root);
69839 hits += replace_path(&fs->pwd, old_root, new_root);
69840 write_seqcount_end(&fs->seq);
69841@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
69842
69843 void free_fs_struct(struct fs_struct *fs)
69844 {
69845+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
69846 path_put(&fs->root);
69847 path_put(&fs->pwd);
69848 kmem_cache_free(fs_cachep, fs);
69849@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
69850 task_lock(tsk);
69851 spin_lock(&fs->lock);
69852 tsk->fs = NULL;
69853- kill = !--fs->users;
69854+ gr_clear_chroot_entries(tsk);
69855+ kill = !atomic_dec_return(&fs->users);
69856 spin_unlock(&fs->lock);
69857 task_unlock(tsk);
69858 if (kill)
69859@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
69860 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
69861 /* We don't need to lock fs - think why ;-) */
69862 if (fs) {
69863- fs->users = 1;
69864+ atomic_set(&fs->users, 1);
69865 fs->in_exec = 0;
69866 spin_lock_init(&fs->lock);
69867 seqcount_init(&fs->seq);
69868@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
69869 spin_lock(&old->lock);
69870 fs->root = old->root;
69871 path_get(&fs->root);
69872+ /* instead of calling gr_set_chroot_entries here,
69873+ we call it from every caller of this function
69874+ */
69875 fs->pwd = old->pwd;
69876 path_get(&fs->pwd);
69877 spin_unlock(&old->lock);
69878+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
69879 }
69880 return fs;
69881 }
69882@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
69883
69884 task_lock(current);
69885 spin_lock(&fs->lock);
69886- kill = !--fs->users;
69887+ kill = !atomic_dec_return(&fs->users);
69888 current->fs = new_fs;
69889+ gr_set_chroot_entries(current, &new_fs->root);
69890 spin_unlock(&fs->lock);
69891 task_unlock(current);
69892
69893@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
69894
69895 int current_umask(void)
69896 {
69897- return current->fs->umask;
69898+ return current->fs->umask | gr_acl_umask();
69899 }
69900 EXPORT_SYMBOL(current_umask);
69901
69902 /* to be mentioned only in INIT_TASK */
69903 struct fs_struct init_fs = {
69904- .users = 1,
69905+ .users = ATOMIC_INIT(1),
69906 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
69907 .seq = SEQCNT_ZERO(init_fs.seq),
69908 .umask = 0022,
69909diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
69910index 89acec7..a575262 100644
69911--- a/fs/fscache/cookie.c
69912+++ b/fs/fscache/cookie.c
69913@@ -19,7 +19,7 @@
69914
69915 struct kmem_cache *fscache_cookie_jar;
69916
69917-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
69918+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
69919
69920 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
69921 static int fscache_alloc_object(struct fscache_cache *cache,
69922@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
69923 parent ? (char *) parent->def->name : "<no-parent>",
69924 def->name, netfs_data, enable);
69925
69926- fscache_stat(&fscache_n_acquires);
69927+ fscache_stat_unchecked(&fscache_n_acquires);
69928
69929 /* if there's no parent cookie, then we don't create one here either */
69930 if (!parent) {
69931- fscache_stat(&fscache_n_acquires_null);
69932+ fscache_stat_unchecked(&fscache_n_acquires_null);
69933 _leave(" [no parent]");
69934 return NULL;
69935 }
69936@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
69937 /* allocate and initialise a cookie */
69938 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
69939 if (!cookie) {
69940- fscache_stat(&fscache_n_acquires_oom);
69941+ fscache_stat_unchecked(&fscache_n_acquires_oom);
69942 _leave(" [ENOMEM]");
69943 return NULL;
69944 }
69945@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
69946
69947 switch (cookie->def->type) {
69948 case FSCACHE_COOKIE_TYPE_INDEX:
69949- fscache_stat(&fscache_n_cookie_index);
69950+ fscache_stat_unchecked(&fscache_n_cookie_index);
69951 break;
69952 case FSCACHE_COOKIE_TYPE_DATAFILE:
69953- fscache_stat(&fscache_n_cookie_data);
69954+ fscache_stat_unchecked(&fscache_n_cookie_data);
69955 break;
69956 default:
69957- fscache_stat(&fscache_n_cookie_special);
69958+ fscache_stat_unchecked(&fscache_n_cookie_special);
69959 break;
69960 }
69961
69962@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
69963 } else {
69964 atomic_dec(&parent->n_children);
69965 __fscache_cookie_put(cookie);
69966- fscache_stat(&fscache_n_acquires_nobufs);
69967+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
69968 _leave(" = NULL");
69969 return NULL;
69970 }
69971@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
69972 }
69973 }
69974
69975- fscache_stat(&fscache_n_acquires_ok);
69976+ fscache_stat_unchecked(&fscache_n_acquires_ok);
69977 _leave(" = %p", cookie);
69978 return cookie;
69979 }
69980@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
69981 cache = fscache_select_cache_for_object(cookie->parent);
69982 if (!cache) {
69983 up_read(&fscache_addremove_sem);
69984- fscache_stat(&fscache_n_acquires_no_cache);
69985+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
69986 _leave(" = -ENOMEDIUM [no cache]");
69987 return -ENOMEDIUM;
69988 }
69989@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
69990 object = cache->ops->alloc_object(cache, cookie);
69991 fscache_stat_d(&fscache_n_cop_alloc_object);
69992 if (IS_ERR(object)) {
69993- fscache_stat(&fscache_n_object_no_alloc);
69994+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
69995 ret = PTR_ERR(object);
69996 goto error;
69997 }
69998
69999- fscache_stat(&fscache_n_object_alloc);
70000+ fscache_stat_unchecked(&fscache_n_object_alloc);
70001
70002- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
70003+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
70004
70005 _debug("ALLOC OBJ%x: %s {%lx}",
70006 object->debug_id, cookie->def->name, object->events);
70007@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
70008
70009 _enter("{%s}", cookie->def->name);
70010
70011- fscache_stat(&fscache_n_invalidates);
70012+ fscache_stat_unchecked(&fscache_n_invalidates);
70013
70014 /* Only permit invalidation of data files. Invalidating an index will
70015 * require the caller to release all its attachments to the tree rooted
70016@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
70017 {
70018 struct fscache_object *object;
70019
70020- fscache_stat(&fscache_n_updates);
70021+ fscache_stat_unchecked(&fscache_n_updates);
70022
70023 if (!cookie) {
70024- fscache_stat(&fscache_n_updates_null);
70025+ fscache_stat_unchecked(&fscache_n_updates_null);
70026 _leave(" [no cookie]");
70027 return;
70028 }
70029@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
70030 */
70031 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
70032 {
70033- fscache_stat(&fscache_n_relinquishes);
70034+ fscache_stat_unchecked(&fscache_n_relinquishes);
70035 if (retire)
70036- fscache_stat(&fscache_n_relinquishes_retire);
70037+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
70038
70039 if (!cookie) {
70040- fscache_stat(&fscache_n_relinquishes_null);
70041+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
70042 _leave(" [no cookie]");
70043 return;
70044 }
70045@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
70046 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
70047 goto inconsistent;
70048
70049- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
70050+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
70051
70052 __fscache_use_cookie(cookie);
70053 if (fscache_submit_op(object, op) < 0)
70054diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
70055index 7872a62..d91b19f 100644
70056--- a/fs/fscache/internal.h
70057+++ b/fs/fscache/internal.h
70058@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
70059 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
70060 extern int fscache_wait_for_operation_activation(struct fscache_object *,
70061 struct fscache_operation *,
70062- atomic_t *,
70063- atomic_t *,
70064+ atomic_unchecked_t *,
70065+ atomic_unchecked_t *,
70066 void (*)(struct fscache_operation *));
70067 extern void fscache_invalidate_writes(struct fscache_cookie *);
70068
70069@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
70070 * stats.c
70071 */
70072 #ifdef CONFIG_FSCACHE_STATS
70073-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
70074-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
70075+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
70076+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
70077
70078-extern atomic_t fscache_n_op_pend;
70079-extern atomic_t fscache_n_op_run;
70080-extern atomic_t fscache_n_op_enqueue;
70081-extern atomic_t fscache_n_op_deferred_release;
70082-extern atomic_t fscache_n_op_release;
70083-extern atomic_t fscache_n_op_gc;
70084-extern atomic_t fscache_n_op_cancelled;
70085-extern atomic_t fscache_n_op_rejected;
70086+extern atomic_unchecked_t fscache_n_op_pend;
70087+extern atomic_unchecked_t fscache_n_op_run;
70088+extern atomic_unchecked_t fscache_n_op_enqueue;
70089+extern atomic_unchecked_t fscache_n_op_deferred_release;
70090+extern atomic_unchecked_t fscache_n_op_release;
70091+extern atomic_unchecked_t fscache_n_op_gc;
70092+extern atomic_unchecked_t fscache_n_op_cancelled;
70093+extern atomic_unchecked_t fscache_n_op_rejected;
70094
70095-extern atomic_t fscache_n_attr_changed;
70096-extern atomic_t fscache_n_attr_changed_ok;
70097-extern atomic_t fscache_n_attr_changed_nobufs;
70098-extern atomic_t fscache_n_attr_changed_nomem;
70099-extern atomic_t fscache_n_attr_changed_calls;
70100+extern atomic_unchecked_t fscache_n_attr_changed;
70101+extern atomic_unchecked_t fscache_n_attr_changed_ok;
70102+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
70103+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
70104+extern atomic_unchecked_t fscache_n_attr_changed_calls;
70105
70106-extern atomic_t fscache_n_allocs;
70107-extern atomic_t fscache_n_allocs_ok;
70108-extern atomic_t fscache_n_allocs_wait;
70109-extern atomic_t fscache_n_allocs_nobufs;
70110-extern atomic_t fscache_n_allocs_intr;
70111-extern atomic_t fscache_n_allocs_object_dead;
70112-extern atomic_t fscache_n_alloc_ops;
70113-extern atomic_t fscache_n_alloc_op_waits;
70114+extern atomic_unchecked_t fscache_n_allocs;
70115+extern atomic_unchecked_t fscache_n_allocs_ok;
70116+extern atomic_unchecked_t fscache_n_allocs_wait;
70117+extern atomic_unchecked_t fscache_n_allocs_nobufs;
70118+extern atomic_unchecked_t fscache_n_allocs_intr;
70119+extern atomic_unchecked_t fscache_n_allocs_object_dead;
70120+extern atomic_unchecked_t fscache_n_alloc_ops;
70121+extern atomic_unchecked_t fscache_n_alloc_op_waits;
70122
70123-extern atomic_t fscache_n_retrievals;
70124-extern atomic_t fscache_n_retrievals_ok;
70125-extern atomic_t fscache_n_retrievals_wait;
70126-extern atomic_t fscache_n_retrievals_nodata;
70127-extern atomic_t fscache_n_retrievals_nobufs;
70128-extern atomic_t fscache_n_retrievals_intr;
70129-extern atomic_t fscache_n_retrievals_nomem;
70130-extern atomic_t fscache_n_retrievals_object_dead;
70131-extern atomic_t fscache_n_retrieval_ops;
70132-extern atomic_t fscache_n_retrieval_op_waits;
70133+extern atomic_unchecked_t fscache_n_retrievals;
70134+extern atomic_unchecked_t fscache_n_retrievals_ok;
70135+extern atomic_unchecked_t fscache_n_retrievals_wait;
70136+extern atomic_unchecked_t fscache_n_retrievals_nodata;
70137+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
70138+extern atomic_unchecked_t fscache_n_retrievals_intr;
70139+extern atomic_unchecked_t fscache_n_retrievals_nomem;
70140+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
70141+extern atomic_unchecked_t fscache_n_retrieval_ops;
70142+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
70143
70144-extern atomic_t fscache_n_stores;
70145-extern atomic_t fscache_n_stores_ok;
70146-extern atomic_t fscache_n_stores_again;
70147-extern atomic_t fscache_n_stores_nobufs;
70148-extern atomic_t fscache_n_stores_oom;
70149-extern atomic_t fscache_n_store_ops;
70150-extern atomic_t fscache_n_store_calls;
70151-extern atomic_t fscache_n_store_pages;
70152-extern atomic_t fscache_n_store_radix_deletes;
70153-extern atomic_t fscache_n_store_pages_over_limit;
70154+extern atomic_unchecked_t fscache_n_stores;
70155+extern atomic_unchecked_t fscache_n_stores_ok;
70156+extern atomic_unchecked_t fscache_n_stores_again;
70157+extern atomic_unchecked_t fscache_n_stores_nobufs;
70158+extern atomic_unchecked_t fscache_n_stores_oom;
70159+extern atomic_unchecked_t fscache_n_store_ops;
70160+extern atomic_unchecked_t fscache_n_store_calls;
70161+extern atomic_unchecked_t fscache_n_store_pages;
70162+extern atomic_unchecked_t fscache_n_store_radix_deletes;
70163+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
70164
70165-extern atomic_t fscache_n_store_vmscan_not_storing;
70166-extern atomic_t fscache_n_store_vmscan_gone;
70167-extern atomic_t fscache_n_store_vmscan_busy;
70168-extern atomic_t fscache_n_store_vmscan_cancelled;
70169-extern atomic_t fscache_n_store_vmscan_wait;
70170+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
70171+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
70172+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
70173+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
70174+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
70175
70176-extern atomic_t fscache_n_marks;
70177-extern atomic_t fscache_n_uncaches;
70178+extern atomic_unchecked_t fscache_n_marks;
70179+extern atomic_unchecked_t fscache_n_uncaches;
70180
70181-extern atomic_t fscache_n_acquires;
70182-extern atomic_t fscache_n_acquires_null;
70183-extern atomic_t fscache_n_acquires_no_cache;
70184-extern atomic_t fscache_n_acquires_ok;
70185-extern atomic_t fscache_n_acquires_nobufs;
70186-extern atomic_t fscache_n_acquires_oom;
70187+extern atomic_unchecked_t fscache_n_acquires;
70188+extern atomic_unchecked_t fscache_n_acquires_null;
70189+extern atomic_unchecked_t fscache_n_acquires_no_cache;
70190+extern atomic_unchecked_t fscache_n_acquires_ok;
70191+extern atomic_unchecked_t fscache_n_acquires_nobufs;
70192+extern atomic_unchecked_t fscache_n_acquires_oom;
70193
70194-extern atomic_t fscache_n_invalidates;
70195-extern atomic_t fscache_n_invalidates_run;
70196+extern atomic_unchecked_t fscache_n_invalidates;
70197+extern atomic_unchecked_t fscache_n_invalidates_run;
70198
70199-extern atomic_t fscache_n_updates;
70200-extern atomic_t fscache_n_updates_null;
70201-extern atomic_t fscache_n_updates_run;
70202+extern atomic_unchecked_t fscache_n_updates;
70203+extern atomic_unchecked_t fscache_n_updates_null;
70204+extern atomic_unchecked_t fscache_n_updates_run;
70205
70206-extern atomic_t fscache_n_relinquishes;
70207-extern atomic_t fscache_n_relinquishes_null;
70208-extern atomic_t fscache_n_relinquishes_waitcrt;
70209-extern atomic_t fscache_n_relinquishes_retire;
70210+extern atomic_unchecked_t fscache_n_relinquishes;
70211+extern atomic_unchecked_t fscache_n_relinquishes_null;
70212+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
70213+extern atomic_unchecked_t fscache_n_relinquishes_retire;
70214
70215-extern atomic_t fscache_n_cookie_index;
70216-extern atomic_t fscache_n_cookie_data;
70217-extern atomic_t fscache_n_cookie_special;
70218+extern atomic_unchecked_t fscache_n_cookie_index;
70219+extern atomic_unchecked_t fscache_n_cookie_data;
70220+extern atomic_unchecked_t fscache_n_cookie_special;
70221
70222-extern atomic_t fscache_n_object_alloc;
70223-extern atomic_t fscache_n_object_no_alloc;
70224-extern atomic_t fscache_n_object_lookups;
70225-extern atomic_t fscache_n_object_lookups_negative;
70226-extern atomic_t fscache_n_object_lookups_positive;
70227-extern atomic_t fscache_n_object_lookups_timed_out;
70228-extern atomic_t fscache_n_object_created;
70229-extern atomic_t fscache_n_object_avail;
70230-extern atomic_t fscache_n_object_dead;
70231+extern atomic_unchecked_t fscache_n_object_alloc;
70232+extern atomic_unchecked_t fscache_n_object_no_alloc;
70233+extern atomic_unchecked_t fscache_n_object_lookups;
70234+extern atomic_unchecked_t fscache_n_object_lookups_negative;
70235+extern atomic_unchecked_t fscache_n_object_lookups_positive;
70236+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
70237+extern atomic_unchecked_t fscache_n_object_created;
70238+extern atomic_unchecked_t fscache_n_object_avail;
70239+extern atomic_unchecked_t fscache_n_object_dead;
70240
70241-extern atomic_t fscache_n_checkaux_none;
70242-extern atomic_t fscache_n_checkaux_okay;
70243-extern atomic_t fscache_n_checkaux_update;
70244-extern atomic_t fscache_n_checkaux_obsolete;
70245+extern atomic_unchecked_t fscache_n_checkaux_none;
70246+extern atomic_unchecked_t fscache_n_checkaux_okay;
70247+extern atomic_unchecked_t fscache_n_checkaux_update;
70248+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
70249
70250 extern atomic_t fscache_n_cop_alloc_object;
70251 extern atomic_t fscache_n_cop_lookup_object;
70252@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
70253 atomic_inc(stat);
70254 }
70255
70256+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
70257+{
70258+ atomic_inc_unchecked(stat);
70259+}
70260+
70261 static inline void fscache_stat_d(atomic_t *stat)
70262 {
70263 atomic_dec(stat);
70264@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
70265
70266 #define __fscache_stat(stat) (NULL)
70267 #define fscache_stat(stat) do {} while (0)
70268+#define fscache_stat_unchecked(stat) do {} while (0)
70269 #define fscache_stat_d(stat) do {} while (0)
70270 #endif
70271
70272diff --git a/fs/fscache/object.c b/fs/fscache/object.c
70273index da032da..0076ce7 100644
70274--- a/fs/fscache/object.c
70275+++ b/fs/fscache/object.c
70276@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
70277 _debug("LOOKUP \"%s\" in \"%s\"",
70278 cookie->def->name, object->cache->tag->name);
70279
70280- fscache_stat(&fscache_n_object_lookups);
70281+ fscache_stat_unchecked(&fscache_n_object_lookups);
70282 fscache_stat(&fscache_n_cop_lookup_object);
70283 ret = object->cache->ops->lookup_object(object);
70284 fscache_stat_d(&fscache_n_cop_lookup_object);
70285@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
70286 if (ret == -ETIMEDOUT) {
70287 /* probably stuck behind another object, so move this one to
70288 * the back of the queue */
70289- fscache_stat(&fscache_n_object_lookups_timed_out);
70290+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
70291 _leave(" [timeout]");
70292 return NO_TRANSIT;
70293 }
70294@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
70295 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
70296
70297 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
70298- fscache_stat(&fscache_n_object_lookups_negative);
70299+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
70300
70301 /* Allow write requests to begin stacking up and read requests to begin
70302 * returning ENODATA.
70303@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
70304 /* if we were still looking up, then we must have a positive lookup
70305 * result, in which case there may be data available */
70306 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
70307- fscache_stat(&fscache_n_object_lookups_positive);
70308+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
70309
70310 /* We do (presumably) have data */
70311 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
70312@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
70313 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
70314 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
70315 } else {
70316- fscache_stat(&fscache_n_object_created);
70317+ fscache_stat_unchecked(&fscache_n_object_created);
70318 }
70319
70320 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
70321@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
70322 fscache_stat_d(&fscache_n_cop_lookup_complete);
70323
70324 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
70325- fscache_stat(&fscache_n_object_avail);
70326+ fscache_stat_unchecked(&fscache_n_object_avail);
70327
70328 _leave("");
70329 return transit_to(JUMPSTART_DEPS);
70330@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
70331
70332 /* this just shifts the object release to the work processor */
70333 fscache_put_object(object);
70334- fscache_stat(&fscache_n_object_dead);
70335+ fscache_stat_unchecked(&fscache_n_object_dead);
70336
70337 _leave("");
70338 return transit_to(OBJECT_DEAD);
70339@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
70340 enum fscache_checkaux result;
70341
70342 if (!object->cookie->def->check_aux) {
70343- fscache_stat(&fscache_n_checkaux_none);
70344+ fscache_stat_unchecked(&fscache_n_checkaux_none);
70345 return FSCACHE_CHECKAUX_OKAY;
70346 }
70347
70348@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
70349 switch (result) {
70350 /* entry okay as is */
70351 case FSCACHE_CHECKAUX_OKAY:
70352- fscache_stat(&fscache_n_checkaux_okay);
70353+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
70354 break;
70355
70356 /* entry requires update */
70357 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
70358- fscache_stat(&fscache_n_checkaux_update);
70359+ fscache_stat_unchecked(&fscache_n_checkaux_update);
70360 break;
70361
70362 /* entry requires deletion */
70363 case FSCACHE_CHECKAUX_OBSOLETE:
70364- fscache_stat(&fscache_n_checkaux_obsolete);
70365+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
70366 break;
70367
70368 default:
70369@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
70370 {
70371 const struct fscache_state *s;
70372
70373- fscache_stat(&fscache_n_invalidates_run);
70374+ fscache_stat_unchecked(&fscache_n_invalidates_run);
70375 fscache_stat(&fscache_n_cop_invalidate_object);
70376 s = _fscache_invalidate_object(object, event);
70377 fscache_stat_d(&fscache_n_cop_invalidate_object);
70378@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
70379 {
70380 _enter("{OBJ%x},%d", object->debug_id, event);
70381
70382- fscache_stat(&fscache_n_updates_run);
70383+ fscache_stat_unchecked(&fscache_n_updates_run);
70384 fscache_stat(&fscache_n_cop_update_object);
70385 object->cache->ops->update_object(object);
70386 fscache_stat_d(&fscache_n_cop_update_object);
70387diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
70388index e7b87a0..a85d47a 100644
70389--- a/fs/fscache/operation.c
70390+++ b/fs/fscache/operation.c
70391@@ -17,7 +17,7 @@
70392 #include <linux/slab.h>
70393 #include "internal.h"
70394
70395-atomic_t fscache_op_debug_id;
70396+atomic_unchecked_t fscache_op_debug_id;
70397 EXPORT_SYMBOL(fscache_op_debug_id);
70398
70399 /**
70400@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
70401 ASSERTCMP(atomic_read(&op->usage), >, 0);
70402 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
70403
70404- fscache_stat(&fscache_n_op_enqueue);
70405+ fscache_stat_unchecked(&fscache_n_op_enqueue);
70406 switch (op->flags & FSCACHE_OP_TYPE) {
70407 case FSCACHE_OP_ASYNC:
70408 _debug("queue async");
70409@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
70410 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
70411 if (op->processor)
70412 fscache_enqueue_operation(op);
70413- fscache_stat(&fscache_n_op_run);
70414+ fscache_stat_unchecked(&fscache_n_op_run);
70415 }
70416
70417 /*
70418@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
70419 if (object->n_in_progress > 0) {
70420 atomic_inc(&op->usage);
70421 list_add_tail(&op->pend_link, &object->pending_ops);
70422- fscache_stat(&fscache_n_op_pend);
70423+ fscache_stat_unchecked(&fscache_n_op_pend);
70424 } else if (!list_empty(&object->pending_ops)) {
70425 atomic_inc(&op->usage);
70426 list_add_tail(&op->pend_link, &object->pending_ops);
70427- fscache_stat(&fscache_n_op_pend);
70428+ fscache_stat_unchecked(&fscache_n_op_pend);
70429 fscache_start_operations(object);
70430 } else {
70431 ASSERTCMP(object->n_in_progress, ==, 0);
70432@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
70433 object->n_exclusive++; /* reads and writes must wait */
70434 atomic_inc(&op->usage);
70435 list_add_tail(&op->pend_link, &object->pending_ops);
70436- fscache_stat(&fscache_n_op_pend);
70437+ fscache_stat_unchecked(&fscache_n_op_pend);
70438 ret = 0;
70439 } else {
70440 /* If we're in any other state, there must have been an I/O
70441@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
70442 if (object->n_exclusive > 0) {
70443 atomic_inc(&op->usage);
70444 list_add_tail(&op->pend_link, &object->pending_ops);
70445- fscache_stat(&fscache_n_op_pend);
70446+ fscache_stat_unchecked(&fscache_n_op_pend);
70447 } else if (!list_empty(&object->pending_ops)) {
70448 atomic_inc(&op->usage);
70449 list_add_tail(&op->pend_link, &object->pending_ops);
70450- fscache_stat(&fscache_n_op_pend);
70451+ fscache_stat_unchecked(&fscache_n_op_pend);
70452 fscache_start_operations(object);
70453 } else {
70454 ASSERTCMP(object->n_exclusive, ==, 0);
70455@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
70456 object->n_ops++;
70457 atomic_inc(&op->usage);
70458 list_add_tail(&op->pend_link, &object->pending_ops);
70459- fscache_stat(&fscache_n_op_pend);
70460+ fscache_stat_unchecked(&fscache_n_op_pend);
70461 ret = 0;
70462 } else if (fscache_object_is_dying(object)) {
70463- fscache_stat(&fscache_n_op_rejected);
70464+ fscache_stat_unchecked(&fscache_n_op_rejected);
70465 op->state = FSCACHE_OP_ST_CANCELLED;
70466 ret = -ENOBUFS;
70467 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
70468@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
70469 ret = -EBUSY;
70470 if (op->state == FSCACHE_OP_ST_PENDING) {
70471 ASSERT(!list_empty(&op->pend_link));
70472- fscache_stat(&fscache_n_op_cancelled);
70473+ fscache_stat_unchecked(&fscache_n_op_cancelled);
70474 list_del_init(&op->pend_link);
70475 if (do_cancel)
70476 do_cancel(op);
70477@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
70478 while (!list_empty(&object->pending_ops)) {
70479 op = list_entry(object->pending_ops.next,
70480 struct fscache_operation, pend_link);
70481- fscache_stat(&fscache_n_op_cancelled);
70482+ fscache_stat_unchecked(&fscache_n_op_cancelled);
70483 list_del_init(&op->pend_link);
70484
70485 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
70486@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
70487 op->state, ==, FSCACHE_OP_ST_CANCELLED);
70488 op->state = FSCACHE_OP_ST_DEAD;
70489
70490- fscache_stat(&fscache_n_op_release);
70491+ fscache_stat_unchecked(&fscache_n_op_release);
70492
70493 if (op->release) {
70494 op->release(op);
70495@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
70496 * lock, and defer it otherwise */
70497 if (!spin_trylock(&object->lock)) {
70498 _debug("defer put");
70499- fscache_stat(&fscache_n_op_deferred_release);
70500+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
70501
70502 cache = object->cache;
70503 spin_lock(&cache->op_gc_list_lock);
70504@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
70505
70506 _debug("GC DEFERRED REL OBJ%x OP%x",
70507 object->debug_id, op->debug_id);
70508- fscache_stat(&fscache_n_op_gc);
70509+ fscache_stat_unchecked(&fscache_n_op_gc);
70510
70511 ASSERTCMP(atomic_read(&op->usage), ==, 0);
70512 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
70513diff --git a/fs/fscache/page.c b/fs/fscache/page.c
70514index de33b3f..8be4d29 100644
70515--- a/fs/fscache/page.c
70516+++ b/fs/fscache/page.c
70517@@ -74,7 +74,7 @@ try_again:
70518 val = radix_tree_lookup(&cookie->stores, page->index);
70519 if (!val) {
70520 rcu_read_unlock();
70521- fscache_stat(&fscache_n_store_vmscan_not_storing);
70522+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
70523 __fscache_uncache_page(cookie, page);
70524 return true;
70525 }
70526@@ -104,11 +104,11 @@ try_again:
70527 spin_unlock(&cookie->stores_lock);
70528
70529 if (xpage) {
70530- fscache_stat(&fscache_n_store_vmscan_cancelled);
70531- fscache_stat(&fscache_n_store_radix_deletes);
70532+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
70533+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
70534 ASSERTCMP(xpage, ==, page);
70535 } else {
70536- fscache_stat(&fscache_n_store_vmscan_gone);
70537+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
70538 }
70539
70540 wake_up_bit(&cookie->flags, 0);
70541@@ -123,11 +123,11 @@ page_busy:
70542 * sleeping on memory allocation, so we may need to impose a timeout
70543 * too. */
70544 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
70545- fscache_stat(&fscache_n_store_vmscan_busy);
70546+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
70547 return false;
70548 }
70549
70550- fscache_stat(&fscache_n_store_vmscan_wait);
70551+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
70552 if (!release_page_wait_timeout(cookie, page))
70553 _debug("fscache writeout timeout page: %p{%lx}",
70554 page, page->index);
70555@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
70556 FSCACHE_COOKIE_STORING_TAG);
70557 if (!radix_tree_tag_get(&cookie->stores, page->index,
70558 FSCACHE_COOKIE_PENDING_TAG)) {
70559- fscache_stat(&fscache_n_store_radix_deletes);
70560+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
70561 xpage = radix_tree_delete(&cookie->stores, page->index);
70562 }
70563 spin_unlock(&cookie->stores_lock);
70564@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
70565
70566 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
70567
70568- fscache_stat(&fscache_n_attr_changed_calls);
70569+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
70570
70571 if (fscache_object_is_active(object)) {
70572 fscache_stat(&fscache_n_cop_attr_changed);
70573@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
70574
70575 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
70576
70577- fscache_stat(&fscache_n_attr_changed);
70578+ fscache_stat_unchecked(&fscache_n_attr_changed);
70579
70580 op = kzalloc(sizeof(*op), GFP_KERNEL);
70581 if (!op) {
70582- fscache_stat(&fscache_n_attr_changed_nomem);
70583+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
70584 _leave(" = -ENOMEM");
70585 return -ENOMEM;
70586 }
70587@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
70588 if (fscache_submit_exclusive_op(object, op) < 0)
70589 goto nobufs_dec;
70590 spin_unlock(&cookie->lock);
70591- fscache_stat(&fscache_n_attr_changed_ok);
70592+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
70593 fscache_put_operation(op);
70594 _leave(" = 0");
70595 return 0;
70596@@ -242,7 +242,7 @@ nobufs:
70597 kfree(op);
70598 if (wake_cookie)
70599 __fscache_wake_unused_cookie(cookie);
70600- fscache_stat(&fscache_n_attr_changed_nobufs);
70601+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
70602 _leave(" = %d", -ENOBUFS);
70603 return -ENOBUFS;
70604 }
70605@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
70606 /* allocate a retrieval operation and attempt to submit it */
70607 op = kzalloc(sizeof(*op), GFP_NOIO);
70608 if (!op) {
70609- fscache_stat(&fscache_n_retrievals_nomem);
70610+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
70611 return NULL;
70612 }
70613
70614@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
70615 return 0;
70616 }
70617
70618- fscache_stat(&fscache_n_retrievals_wait);
70619+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
70620
70621 jif = jiffies;
70622 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
70623 TASK_INTERRUPTIBLE) != 0) {
70624- fscache_stat(&fscache_n_retrievals_intr);
70625+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
70626 _leave(" = -ERESTARTSYS");
70627 return -ERESTARTSYS;
70628 }
70629@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
70630 */
70631 int fscache_wait_for_operation_activation(struct fscache_object *object,
70632 struct fscache_operation *op,
70633- atomic_t *stat_op_waits,
70634- atomic_t *stat_object_dead,
70635+ atomic_unchecked_t *stat_op_waits,
70636+ atomic_unchecked_t *stat_object_dead,
70637 void (*do_cancel)(struct fscache_operation *))
70638 {
70639 int ret;
70640@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
70641
70642 _debug(">>> WT");
70643 if (stat_op_waits)
70644- fscache_stat(stat_op_waits);
70645+ fscache_stat_unchecked(stat_op_waits);
70646 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
70647 TASK_INTERRUPTIBLE) != 0) {
70648 ret = fscache_cancel_op(op, do_cancel);
70649@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
70650 check_if_dead:
70651 if (op->state == FSCACHE_OP_ST_CANCELLED) {
70652 if (stat_object_dead)
70653- fscache_stat(stat_object_dead);
70654+ fscache_stat_unchecked(stat_object_dead);
70655 _leave(" = -ENOBUFS [cancelled]");
70656 return -ENOBUFS;
70657 }
70658@@ -381,7 +381,7 @@ check_if_dead:
70659 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
70660 fscache_cancel_op(op, do_cancel);
70661 if (stat_object_dead)
70662- fscache_stat(stat_object_dead);
70663+ fscache_stat_unchecked(stat_object_dead);
70664 return -ENOBUFS;
70665 }
70666 return 0;
70667@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70668
70669 _enter("%p,%p,,,", cookie, page);
70670
70671- fscache_stat(&fscache_n_retrievals);
70672+ fscache_stat_unchecked(&fscache_n_retrievals);
70673
70674 if (hlist_empty(&cookie->backing_objects))
70675 goto nobufs;
70676@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70677 goto nobufs_unlock_dec;
70678 spin_unlock(&cookie->lock);
70679
70680- fscache_stat(&fscache_n_retrieval_ops);
70681+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
70682
70683 /* pin the netfs read context in case we need to do the actual netfs
70684 * read because we've encountered a cache read failure */
70685@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70686
70687 error:
70688 if (ret == -ENOMEM)
70689- fscache_stat(&fscache_n_retrievals_nomem);
70690+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
70691 else if (ret == -ERESTARTSYS)
70692- fscache_stat(&fscache_n_retrievals_intr);
70693+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
70694 else if (ret == -ENODATA)
70695- fscache_stat(&fscache_n_retrievals_nodata);
70696+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
70697 else if (ret < 0)
70698- fscache_stat(&fscache_n_retrievals_nobufs);
70699+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
70700 else
70701- fscache_stat(&fscache_n_retrievals_ok);
70702+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
70703
70704 fscache_put_retrieval(op);
70705 _leave(" = %d", ret);
70706@@ -505,7 +505,7 @@ nobufs_unlock:
70707 __fscache_wake_unused_cookie(cookie);
70708 kfree(op);
70709 nobufs:
70710- fscache_stat(&fscache_n_retrievals_nobufs);
70711+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
70712 _leave(" = -ENOBUFS");
70713 return -ENOBUFS;
70714 }
70715@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
70716
70717 _enter("%p,,%d,,,", cookie, *nr_pages);
70718
70719- fscache_stat(&fscache_n_retrievals);
70720+ fscache_stat_unchecked(&fscache_n_retrievals);
70721
70722 if (hlist_empty(&cookie->backing_objects))
70723 goto nobufs;
70724@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
70725 goto nobufs_unlock_dec;
70726 spin_unlock(&cookie->lock);
70727
70728- fscache_stat(&fscache_n_retrieval_ops);
70729+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
70730
70731 /* pin the netfs read context in case we need to do the actual netfs
70732 * read because we've encountered a cache read failure */
70733@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
70734
70735 error:
70736 if (ret == -ENOMEM)
70737- fscache_stat(&fscache_n_retrievals_nomem);
70738+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
70739 else if (ret == -ERESTARTSYS)
70740- fscache_stat(&fscache_n_retrievals_intr);
70741+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
70742 else if (ret == -ENODATA)
70743- fscache_stat(&fscache_n_retrievals_nodata);
70744+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
70745 else if (ret < 0)
70746- fscache_stat(&fscache_n_retrievals_nobufs);
70747+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
70748 else
70749- fscache_stat(&fscache_n_retrievals_ok);
70750+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
70751
70752 fscache_put_retrieval(op);
70753 _leave(" = %d", ret);
70754@@ -636,7 +636,7 @@ nobufs_unlock:
70755 if (wake_cookie)
70756 __fscache_wake_unused_cookie(cookie);
70757 nobufs:
70758- fscache_stat(&fscache_n_retrievals_nobufs);
70759+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
70760 _leave(" = -ENOBUFS");
70761 return -ENOBUFS;
70762 }
70763@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
70764
70765 _enter("%p,%p,,,", cookie, page);
70766
70767- fscache_stat(&fscache_n_allocs);
70768+ fscache_stat_unchecked(&fscache_n_allocs);
70769
70770 if (hlist_empty(&cookie->backing_objects))
70771 goto nobufs;
70772@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
70773 goto nobufs_unlock_dec;
70774 spin_unlock(&cookie->lock);
70775
70776- fscache_stat(&fscache_n_alloc_ops);
70777+ fscache_stat_unchecked(&fscache_n_alloc_ops);
70778
70779 ret = fscache_wait_for_operation_activation(
70780 object, &op->op,
70781@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
70782
70783 error:
70784 if (ret == -ERESTARTSYS)
70785- fscache_stat(&fscache_n_allocs_intr);
70786+ fscache_stat_unchecked(&fscache_n_allocs_intr);
70787 else if (ret < 0)
70788- fscache_stat(&fscache_n_allocs_nobufs);
70789+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
70790 else
70791- fscache_stat(&fscache_n_allocs_ok);
70792+ fscache_stat_unchecked(&fscache_n_allocs_ok);
70793
70794 fscache_put_retrieval(op);
70795 _leave(" = %d", ret);
70796@@ -730,7 +730,7 @@ nobufs_unlock:
70797 if (wake_cookie)
70798 __fscache_wake_unused_cookie(cookie);
70799 nobufs:
70800- fscache_stat(&fscache_n_allocs_nobufs);
70801+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
70802 _leave(" = -ENOBUFS");
70803 return -ENOBUFS;
70804 }
70805@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
70806
70807 spin_lock(&cookie->stores_lock);
70808
70809- fscache_stat(&fscache_n_store_calls);
70810+ fscache_stat_unchecked(&fscache_n_store_calls);
70811
70812 /* find a page to store */
70813 page = NULL;
70814@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
70815 page = results[0];
70816 _debug("gang %d [%lx]", n, page->index);
70817 if (page->index > op->store_limit) {
70818- fscache_stat(&fscache_n_store_pages_over_limit);
70819+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
70820 goto superseded;
70821 }
70822
70823@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
70824 spin_unlock(&cookie->stores_lock);
70825 spin_unlock(&object->lock);
70826
70827- fscache_stat(&fscache_n_store_pages);
70828+ fscache_stat_unchecked(&fscache_n_store_pages);
70829 fscache_stat(&fscache_n_cop_write_page);
70830 ret = object->cache->ops->write_page(op, page);
70831 fscache_stat_d(&fscache_n_cop_write_page);
70832@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
70833 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
70834 ASSERT(PageFsCache(page));
70835
70836- fscache_stat(&fscache_n_stores);
70837+ fscache_stat_unchecked(&fscache_n_stores);
70838
70839 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
70840 _leave(" = -ENOBUFS [invalidating]");
70841@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
70842 spin_unlock(&cookie->stores_lock);
70843 spin_unlock(&object->lock);
70844
70845- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
70846+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
70847 op->store_limit = object->store_limit;
70848
70849 __fscache_use_cookie(cookie);
70850@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
70851
70852 spin_unlock(&cookie->lock);
70853 radix_tree_preload_end();
70854- fscache_stat(&fscache_n_store_ops);
70855- fscache_stat(&fscache_n_stores_ok);
70856+ fscache_stat_unchecked(&fscache_n_store_ops);
70857+ fscache_stat_unchecked(&fscache_n_stores_ok);
70858
70859 /* the work queue now carries its own ref on the object */
70860 fscache_put_operation(&op->op);
70861@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
70862 return 0;
70863
70864 already_queued:
70865- fscache_stat(&fscache_n_stores_again);
70866+ fscache_stat_unchecked(&fscache_n_stores_again);
70867 already_pending:
70868 spin_unlock(&cookie->stores_lock);
70869 spin_unlock(&object->lock);
70870 spin_unlock(&cookie->lock);
70871 radix_tree_preload_end();
70872 kfree(op);
70873- fscache_stat(&fscache_n_stores_ok);
70874+ fscache_stat_unchecked(&fscache_n_stores_ok);
70875 _leave(" = 0");
70876 return 0;
70877
70878@@ -1039,14 +1039,14 @@ nobufs:
70879 kfree(op);
70880 if (wake_cookie)
70881 __fscache_wake_unused_cookie(cookie);
70882- fscache_stat(&fscache_n_stores_nobufs);
70883+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
70884 _leave(" = -ENOBUFS");
70885 return -ENOBUFS;
70886
70887 nomem_free:
70888 kfree(op);
70889 nomem:
70890- fscache_stat(&fscache_n_stores_oom);
70891+ fscache_stat_unchecked(&fscache_n_stores_oom);
70892 _leave(" = -ENOMEM");
70893 return -ENOMEM;
70894 }
70895@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
70896 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
70897 ASSERTCMP(page, !=, NULL);
70898
70899- fscache_stat(&fscache_n_uncaches);
70900+ fscache_stat_unchecked(&fscache_n_uncaches);
70901
70902 /* cache withdrawal may beat us to it */
70903 if (!PageFsCache(page))
70904@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
70905 struct fscache_cookie *cookie = op->op.object->cookie;
70906
70907 #ifdef CONFIG_FSCACHE_STATS
70908- atomic_inc(&fscache_n_marks);
70909+ atomic_inc_unchecked(&fscache_n_marks);
70910 #endif
70911
70912 _debug("- mark %p{%lx}", page, page->index);
70913diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
70914index 40d13c7..ddf52b9 100644
70915--- a/fs/fscache/stats.c
70916+++ b/fs/fscache/stats.c
70917@@ -18,99 +18,99 @@
70918 /*
70919 * operation counters
70920 */
70921-atomic_t fscache_n_op_pend;
70922-atomic_t fscache_n_op_run;
70923-atomic_t fscache_n_op_enqueue;
70924-atomic_t fscache_n_op_requeue;
70925-atomic_t fscache_n_op_deferred_release;
70926-atomic_t fscache_n_op_release;
70927-atomic_t fscache_n_op_gc;
70928-atomic_t fscache_n_op_cancelled;
70929-atomic_t fscache_n_op_rejected;
70930+atomic_unchecked_t fscache_n_op_pend;
70931+atomic_unchecked_t fscache_n_op_run;
70932+atomic_unchecked_t fscache_n_op_enqueue;
70933+atomic_unchecked_t fscache_n_op_requeue;
70934+atomic_unchecked_t fscache_n_op_deferred_release;
70935+atomic_unchecked_t fscache_n_op_release;
70936+atomic_unchecked_t fscache_n_op_gc;
70937+atomic_unchecked_t fscache_n_op_cancelled;
70938+atomic_unchecked_t fscache_n_op_rejected;
70939
70940-atomic_t fscache_n_attr_changed;
70941-atomic_t fscache_n_attr_changed_ok;
70942-atomic_t fscache_n_attr_changed_nobufs;
70943-atomic_t fscache_n_attr_changed_nomem;
70944-atomic_t fscache_n_attr_changed_calls;
70945+atomic_unchecked_t fscache_n_attr_changed;
70946+atomic_unchecked_t fscache_n_attr_changed_ok;
70947+atomic_unchecked_t fscache_n_attr_changed_nobufs;
70948+atomic_unchecked_t fscache_n_attr_changed_nomem;
70949+atomic_unchecked_t fscache_n_attr_changed_calls;
70950
70951-atomic_t fscache_n_allocs;
70952-atomic_t fscache_n_allocs_ok;
70953-atomic_t fscache_n_allocs_wait;
70954-atomic_t fscache_n_allocs_nobufs;
70955-atomic_t fscache_n_allocs_intr;
70956-atomic_t fscache_n_allocs_object_dead;
70957-atomic_t fscache_n_alloc_ops;
70958-atomic_t fscache_n_alloc_op_waits;
70959+atomic_unchecked_t fscache_n_allocs;
70960+atomic_unchecked_t fscache_n_allocs_ok;
70961+atomic_unchecked_t fscache_n_allocs_wait;
70962+atomic_unchecked_t fscache_n_allocs_nobufs;
70963+atomic_unchecked_t fscache_n_allocs_intr;
70964+atomic_unchecked_t fscache_n_allocs_object_dead;
70965+atomic_unchecked_t fscache_n_alloc_ops;
70966+atomic_unchecked_t fscache_n_alloc_op_waits;
70967
70968-atomic_t fscache_n_retrievals;
70969-atomic_t fscache_n_retrievals_ok;
70970-atomic_t fscache_n_retrievals_wait;
70971-atomic_t fscache_n_retrievals_nodata;
70972-atomic_t fscache_n_retrievals_nobufs;
70973-atomic_t fscache_n_retrievals_intr;
70974-atomic_t fscache_n_retrievals_nomem;
70975-atomic_t fscache_n_retrievals_object_dead;
70976-atomic_t fscache_n_retrieval_ops;
70977-atomic_t fscache_n_retrieval_op_waits;
70978+atomic_unchecked_t fscache_n_retrievals;
70979+atomic_unchecked_t fscache_n_retrievals_ok;
70980+atomic_unchecked_t fscache_n_retrievals_wait;
70981+atomic_unchecked_t fscache_n_retrievals_nodata;
70982+atomic_unchecked_t fscache_n_retrievals_nobufs;
70983+atomic_unchecked_t fscache_n_retrievals_intr;
70984+atomic_unchecked_t fscache_n_retrievals_nomem;
70985+atomic_unchecked_t fscache_n_retrievals_object_dead;
70986+atomic_unchecked_t fscache_n_retrieval_ops;
70987+atomic_unchecked_t fscache_n_retrieval_op_waits;
70988
70989-atomic_t fscache_n_stores;
70990-atomic_t fscache_n_stores_ok;
70991-atomic_t fscache_n_stores_again;
70992-atomic_t fscache_n_stores_nobufs;
70993-atomic_t fscache_n_stores_oom;
70994-atomic_t fscache_n_store_ops;
70995-atomic_t fscache_n_store_calls;
70996-atomic_t fscache_n_store_pages;
70997-atomic_t fscache_n_store_radix_deletes;
70998-atomic_t fscache_n_store_pages_over_limit;
70999+atomic_unchecked_t fscache_n_stores;
71000+atomic_unchecked_t fscache_n_stores_ok;
71001+atomic_unchecked_t fscache_n_stores_again;
71002+atomic_unchecked_t fscache_n_stores_nobufs;
71003+atomic_unchecked_t fscache_n_stores_oom;
71004+atomic_unchecked_t fscache_n_store_ops;
71005+atomic_unchecked_t fscache_n_store_calls;
71006+atomic_unchecked_t fscache_n_store_pages;
71007+atomic_unchecked_t fscache_n_store_radix_deletes;
71008+atomic_unchecked_t fscache_n_store_pages_over_limit;
71009
71010-atomic_t fscache_n_store_vmscan_not_storing;
71011-atomic_t fscache_n_store_vmscan_gone;
71012-atomic_t fscache_n_store_vmscan_busy;
71013-atomic_t fscache_n_store_vmscan_cancelled;
71014-atomic_t fscache_n_store_vmscan_wait;
71015+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
71016+atomic_unchecked_t fscache_n_store_vmscan_gone;
71017+atomic_unchecked_t fscache_n_store_vmscan_busy;
71018+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
71019+atomic_unchecked_t fscache_n_store_vmscan_wait;
71020
71021-atomic_t fscache_n_marks;
71022-atomic_t fscache_n_uncaches;
71023+atomic_unchecked_t fscache_n_marks;
71024+atomic_unchecked_t fscache_n_uncaches;
71025
71026-atomic_t fscache_n_acquires;
71027-atomic_t fscache_n_acquires_null;
71028-atomic_t fscache_n_acquires_no_cache;
71029-atomic_t fscache_n_acquires_ok;
71030-atomic_t fscache_n_acquires_nobufs;
71031-atomic_t fscache_n_acquires_oom;
71032+atomic_unchecked_t fscache_n_acquires;
71033+atomic_unchecked_t fscache_n_acquires_null;
71034+atomic_unchecked_t fscache_n_acquires_no_cache;
71035+atomic_unchecked_t fscache_n_acquires_ok;
71036+atomic_unchecked_t fscache_n_acquires_nobufs;
71037+atomic_unchecked_t fscache_n_acquires_oom;
71038
71039-atomic_t fscache_n_invalidates;
71040-atomic_t fscache_n_invalidates_run;
71041+atomic_unchecked_t fscache_n_invalidates;
71042+atomic_unchecked_t fscache_n_invalidates_run;
71043
71044-atomic_t fscache_n_updates;
71045-atomic_t fscache_n_updates_null;
71046-atomic_t fscache_n_updates_run;
71047+atomic_unchecked_t fscache_n_updates;
71048+atomic_unchecked_t fscache_n_updates_null;
71049+atomic_unchecked_t fscache_n_updates_run;
71050
71051-atomic_t fscache_n_relinquishes;
71052-atomic_t fscache_n_relinquishes_null;
71053-atomic_t fscache_n_relinquishes_waitcrt;
71054-atomic_t fscache_n_relinquishes_retire;
71055+atomic_unchecked_t fscache_n_relinquishes;
71056+atomic_unchecked_t fscache_n_relinquishes_null;
71057+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
71058+atomic_unchecked_t fscache_n_relinquishes_retire;
71059
71060-atomic_t fscache_n_cookie_index;
71061-atomic_t fscache_n_cookie_data;
71062-atomic_t fscache_n_cookie_special;
71063+atomic_unchecked_t fscache_n_cookie_index;
71064+atomic_unchecked_t fscache_n_cookie_data;
71065+atomic_unchecked_t fscache_n_cookie_special;
71066
71067-atomic_t fscache_n_object_alloc;
71068-atomic_t fscache_n_object_no_alloc;
71069-atomic_t fscache_n_object_lookups;
71070-atomic_t fscache_n_object_lookups_negative;
71071-atomic_t fscache_n_object_lookups_positive;
71072-atomic_t fscache_n_object_lookups_timed_out;
71073-atomic_t fscache_n_object_created;
71074-atomic_t fscache_n_object_avail;
71075-atomic_t fscache_n_object_dead;
71076+atomic_unchecked_t fscache_n_object_alloc;
71077+atomic_unchecked_t fscache_n_object_no_alloc;
71078+atomic_unchecked_t fscache_n_object_lookups;
71079+atomic_unchecked_t fscache_n_object_lookups_negative;
71080+atomic_unchecked_t fscache_n_object_lookups_positive;
71081+atomic_unchecked_t fscache_n_object_lookups_timed_out;
71082+atomic_unchecked_t fscache_n_object_created;
71083+atomic_unchecked_t fscache_n_object_avail;
71084+atomic_unchecked_t fscache_n_object_dead;
71085
71086-atomic_t fscache_n_checkaux_none;
71087-atomic_t fscache_n_checkaux_okay;
71088-atomic_t fscache_n_checkaux_update;
71089-atomic_t fscache_n_checkaux_obsolete;
71090+atomic_unchecked_t fscache_n_checkaux_none;
71091+atomic_unchecked_t fscache_n_checkaux_okay;
71092+atomic_unchecked_t fscache_n_checkaux_update;
71093+atomic_unchecked_t fscache_n_checkaux_obsolete;
71094
71095 atomic_t fscache_n_cop_alloc_object;
71096 atomic_t fscache_n_cop_lookup_object;
71097@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
71098 seq_puts(m, "FS-Cache statistics\n");
71099
71100 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
71101- atomic_read(&fscache_n_cookie_index),
71102- atomic_read(&fscache_n_cookie_data),
71103- atomic_read(&fscache_n_cookie_special));
71104+ atomic_read_unchecked(&fscache_n_cookie_index),
71105+ atomic_read_unchecked(&fscache_n_cookie_data),
71106+ atomic_read_unchecked(&fscache_n_cookie_special));
71107
71108 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
71109- atomic_read(&fscache_n_object_alloc),
71110- atomic_read(&fscache_n_object_no_alloc),
71111- atomic_read(&fscache_n_object_avail),
71112- atomic_read(&fscache_n_object_dead));
71113+ atomic_read_unchecked(&fscache_n_object_alloc),
71114+ atomic_read_unchecked(&fscache_n_object_no_alloc),
71115+ atomic_read_unchecked(&fscache_n_object_avail),
71116+ atomic_read_unchecked(&fscache_n_object_dead));
71117 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
71118- atomic_read(&fscache_n_checkaux_none),
71119- atomic_read(&fscache_n_checkaux_okay),
71120- atomic_read(&fscache_n_checkaux_update),
71121- atomic_read(&fscache_n_checkaux_obsolete));
71122+ atomic_read_unchecked(&fscache_n_checkaux_none),
71123+ atomic_read_unchecked(&fscache_n_checkaux_okay),
71124+ atomic_read_unchecked(&fscache_n_checkaux_update),
71125+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
71126
71127 seq_printf(m, "Pages : mrk=%u unc=%u\n",
71128- atomic_read(&fscache_n_marks),
71129- atomic_read(&fscache_n_uncaches));
71130+ atomic_read_unchecked(&fscache_n_marks),
71131+ atomic_read_unchecked(&fscache_n_uncaches));
71132
71133 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
71134 " oom=%u\n",
71135- atomic_read(&fscache_n_acquires),
71136- atomic_read(&fscache_n_acquires_null),
71137- atomic_read(&fscache_n_acquires_no_cache),
71138- atomic_read(&fscache_n_acquires_ok),
71139- atomic_read(&fscache_n_acquires_nobufs),
71140- atomic_read(&fscache_n_acquires_oom));
71141+ atomic_read_unchecked(&fscache_n_acquires),
71142+ atomic_read_unchecked(&fscache_n_acquires_null),
71143+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
71144+ atomic_read_unchecked(&fscache_n_acquires_ok),
71145+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
71146+ atomic_read_unchecked(&fscache_n_acquires_oom));
71147
71148 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
71149- atomic_read(&fscache_n_object_lookups),
71150- atomic_read(&fscache_n_object_lookups_negative),
71151- atomic_read(&fscache_n_object_lookups_positive),
71152- atomic_read(&fscache_n_object_created),
71153- atomic_read(&fscache_n_object_lookups_timed_out));
71154+ atomic_read_unchecked(&fscache_n_object_lookups),
71155+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
71156+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
71157+ atomic_read_unchecked(&fscache_n_object_created),
71158+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
71159
71160 seq_printf(m, "Invals : n=%u run=%u\n",
71161- atomic_read(&fscache_n_invalidates),
71162- atomic_read(&fscache_n_invalidates_run));
71163+ atomic_read_unchecked(&fscache_n_invalidates),
71164+ atomic_read_unchecked(&fscache_n_invalidates_run));
71165
71166 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
71167- atomic_read(&fscache_n_updates),
71168- atomic_read(&fscache_n_updates_null),
71169- atomic_read(&fscache_n_updates_run));
71170+ atomic_read_unchecked(&fscache_n_updates),
71171+ atomic_read_unchecked(&fscache_n_updates_null),
71172+ atomic_read_unchecked(&fscache_n_updates_run));
71173
71174 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
71175- atomic_read(&fscache_n_relinquishes),
71176- atomic_read(&fscache_n_relinquishes_null),
71177- atomic_read(&fscache_n_relinquishes_waitcrt),
71178- atomic_read(&fscache_n_relinquishes_retire));
71179+ atomic_read_unchecked(&fscache_n_relinquishes),
71180+ atomic_read_unchecked(&fscache_n_relinquishes_null),
71181+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
71182+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
71183
71184 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
71185- atomic_read(&fscache_n_attr_changed),
71186- atomic_read(&fscache_n_attr_changed_ok),
71187- atomic_read(&fscache_n_attr_changed_nobufs),
71188- atomic_read(&fscache_n_attr_changed_nomem),
71189- atomic_read(&fscache_n_attr_changed_calls));
71190+ atomic_read_unchecked(&fscache_n_attr_changed),
71191+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
71192+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
71193+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
71194+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
71195
71196 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
71197- atomic_read(&fscache_n_allocs),
71198- atomic_read(&fscache_n_allocs_ok),
71199- atomic_read(&fscache_n_allocs_wait),
71200- atomic_read(&fscache_n_allocs_nobufs),
71201- atomic_read(&fscache_n_allocs_intr));
71202+ atomic_read_unchecked(&fscache_n_allocs),
71203+ atomic_read_unchecked(&fscache_n_allocs_ok),
71204+ atomic_read_unchecked(&fscache_n_allocs_wait),
71205+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
71206+ atomic_read_unchecked(&fscache_n_allocs_intr));
71207 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
71208- atomic_read(&fscache_n_alloc_ops),
71209- atomic_read(&fscache_n_alloc_op_waits),
71210- atomic_read(&fscache_n_allocs_object_dead));
71211+ atomic_read_unchecked(&fscache_n_alloc_ops),
71212+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
71213+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
71214
71215 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
71216 " int=%u oom=%u\n",
71217- atomic_read(&fscache_n_retrievals),
71218- atomic_read(&fscache_n_retrievals_ok),
71219- atomic_read(&fscache_n_retrievals_wait),
71220- atomic_read(&fscache_n_retrievals_nodata),
71221- atomic_read(&fscache_n_retrievals_nobufs),
71222- atomic_read(&fscache_n_retrievals_intr),
71223- atomic_read(&fscache_n_retrievals_nomem));
71224+ atomic_read_unchecked(&fscache_n_retrievals),
71225+ atomic_read_unchecked(&fscache_n_retrievals_ok),
71226+ atomic_read_unchecked(&fscache_n_retrievals_wait),
71227+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
71228+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
71229+ atomic_read_unchecked(&fscache_n_retrievals_intr),
71230+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
71231 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
71232- atomic_read(&fscache_n_retrieval_ops),
71233- atomic_read(&fscache_n_retrieval_op_waits),
71234- atomic_read(&fscache_n_retrievals_object_dead));
71235+ atomic_read_unchecked(&fscache_n_retrieval_ops),
71236+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
71237+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
71238
71239 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
71240- atomic_read(&fscache_n_stores),
71241- atomic_read(&fscache_n_stores_ok),
71242- atomic_read(&fscache_n_stores_again),
71243- atomic_read(&fscache_n_stores_nobufs),
71244- atomic_read(&fscache_n_stores_oom));
71245+ atomic_read_unchecked(&fscache_n_stores),
71246+ atomic_read_unchecked(&fscache_n_stores_ok),
71247+ atomic_read_unchecked(&fscache_n_stores_again),
71248+ atomic_read_unchecked(&fscache_n_stores_nobufs),
71249+ atomic_read_unchecked(&fscache_n_stores_oom));
71250 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
71251- atomic_read(&fscache_n_store_ops),
71252- atomic_read(&fscache_n_store_calls),
71253- atomic_read(&fscache_n_store_pages),
71254- atomic_read(&fscache_n_store_radix_deletes),
71255- atomic_read(&fscache_n_store_pages_over_limit));
71256+ atomic_read_unchecked(&fscache_n_store_ops),
71257+ atomic_read_unchecked(&fscache_n_store_calls),
71258+ atomic_read_unchecked(&fscache_n_store_pages),
71259+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
71260+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
71261
71262 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
71263- atomic_read(&fscache_n_store_vmscan_not_storing),
71264- atomic_read(&fscache_n_store_vmscan_gone),
71265- atomic_read(&fscache_n_store_vmscan_busy),
71266- atomic_read(&fscache_n_store_vmscan_cancelled),
71267- atomic_read(&fscache_n_store_vmscan_wait));
71268+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
71269+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
71270+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
71271+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
71272+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
71273
71274 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
71275- atomic_read(&fscache_n_op_pend),
71276- atomic_read(&fscache_n_op_run),
71277- atomic_read(&fscache_n_op_enqueue),
71278- atomic_read(&fscache_n_op_cancelled),
71279- atomic_read(&fscache_n_op_rejected));
71280+ atomic_read_unchecked(&fscache_n_op_pend),
71281+ atomic_read_unchecked(&fscache_n_op_run),
71282+ atomic_read_unchecked(&fscache_n_op_enqueue),
71283+ atomic_read_unchecked(&fscache_n_op_cancelled),
71284+ atomic_read_unchecked(&fscache_n_op_rejected));
71285 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
71286- atomic_read(&fscache_n_op_deferred_release),
71287- atomic_read(&fscache_n_op_release),
71288- atomic_read(&fscache_n_op_gc));
71289+ atomic_read_unchecked(&fscache_n_op_deferred_release),
71290+ atomic_read_unchecked(&fscache_n_op_release),
71291+ atomic_read_unchecked(&fscache_n_op_gc));
71292
71293 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
71294 atomic_read(&fscache_n_cop_alloc_object),
71295diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
71296index 28d0c7a..04816b7 100644
71297--- a/fs/fuse/cuse.c
71298+++ b/fs/fuse/cuse.c
71299@@ -611,10 +611,12 @@ static int __init cuse_init(void)
71300 INIT_LIST_HEAD(&cuse_conntbl[i]);
71301
71302 /* inherit and extend fuse_dev_operations */
71303- cuse_channel_fops = fuse_dev_operations;
71304- cuse_channel_fops.owner = THIS_MODULE;
71305- cuse_channel_fops.open = cuse_channel_open;
71306- cuse_channel_fops.release = cuse_channel_release;
71307+ pax_open_kernel();
71308+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
71309+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
71310+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
71311+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
71312+ pax_close_kernel();
71313
71314 cuse_class = class_create(THIS_MODULE, "cuse");
71315 if (IS_ERR(cuse_class))
71316diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
71317index 39706c5..a803c71 100644
71318--- a/fs/fuse/dev.c
71319+++ b/fs/fuse/dev.c
71320@@ -1405,7 +1405,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
71321 ret = 0;
71322 pipe_lock(pipe);
71323
71324- if (!pipe->readers) {
71325+ if (!atomic_read(&pipe->readers)) {
71326 send_sig(SIGPIPE, current, 0);
71327 if (!ret)
71328 ret = -EPIPE;
71329@@ -1434,7 +1434,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
71330 page_nr++;
71331 ret += buf->len;
71332
71333- if (pipe->files)
71334+ if (atomic_read(&pipe->files))
71335 do_wakeup = 1;
71336 }
71337
71338diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
71339index 1545b71..7fabe47 100644
71340--- a/fs/fuse/dir.c
71341+++ b/fs/fuse/dir.c
71342@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
71343 return link;
71344 }
71345
71346-static void free_link(char *link)
71347+static void free_link(const char *link)
71348 {
71349 if (!IS_ERR(link))
71350 free_page((unsigned long) link);
71351diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
71352index f42dffb..4a4c435 100644
71353--- a/fs/gfs2/glock.c
71354+++ b/fs/gfs2/glock.c
71355@@ -385,9 +385,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
71356 if (held1 != held2) {
71357 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
71358 if (held2)
71359- gl->gl_lockref.count++;
71360+ __lockref_inc(&gl->gl_lockref);
71361 else
71362- gl->gl_lockref.count--;
71363+ __lockref_dec(&gl->gl_lockref);
71364 }
71365 if (held1 && held2 && list_empty(&gl->gl_holders))
71366 clear_bit(GLF_QUEUED, &gl->gl_flags);
71367@@ -614,9 +614,9 @@ out:
71368 out_sched:
71369 clear_bit(GLF_LOCK, &gl->gl_flags);
71370 smp_mb__after_atomic();
71371- gl->gl_lockref.count++;
71372+ __lockref_inc(&gl->gl_lockref);
71373 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71374- gl->gl_lockref.count--;
71375+ __lockref_dec(&gl->gl_lockref);
71376 return;
71377
71378 out_unlock:
71379@@ -742,7 +742,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
71380 gl->gl_sbd = sdp;
71381 gl->gl_flags = 0;
71382 gl->gl_name = name;
71383- gl->gl_lockref.count = 1;
71384+ __lockref_set(&gl->gl_lockref, 1);
71385 gl->gl_state = LM_ST_UNLOCKED;
71386 gl->gl_target = LM_ST_UNLOCKED;
71387 gl->gl_demote_state = LM_ST_EXCLUSIVE;
71388@@ -1020,9 +1020,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
71389 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
71390 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
71391 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
71392- gl->gl_lockref.count++;
71393+ __lockref_inc(&gl->gl_lockref);
71394 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71395- gl->gl_lockref.count--;
71396+ __lockref_dec(&gl->gl_lockref);
71397 }
71398 run_queue(gl, 1);
71399 spin_unlock(&gl->gl_spin);
71400@@ -1325,7 +1325,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
71401 }
71402 }
71403
71404- gl->gl_lockref.count++;
71405+ __lockref_inc(&gl->gl_lockref);
71406 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
71407 spin_unlock(&gl->gl_spin);
71408
71409@@ -1384,12 +1384,12 @@ add_back_to_lru:
71410 goto add_back_to_lru;
71411 }
71412 clear_bit(GLF_LRU, &gl->gl_flags);
71413- gl->gl_lockref.count++;
71414+ __lockref_inc(&gl->gl_lockref);
71415 if (demote_ok(gl))
71416 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
71417 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
71418 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71419- gl->gl_lockref.count--;
71420+ __lockref_dec(&gl->gl_lockref);
71421 spin_unlock(&gl->gl_spin);
71422 cond_resched_lock(&lru_lock);
71423 }
71424@@ -1719,7 +1719,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
71425 state2str(gl->gl_demote_state), dtime,
71426 atomic_read(&gl->gl_ail_count),
71427 atomic_read(&gl->gl_revokes),
71428- (int)gl->gl_lockref.count, gl->gl_hold_time);
71429+ __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
71430
71431 list_for_each_entry(gh, &gl->gl_holders, gh_list)
71432 dump_holder(seq, gh);
71433diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
71434index fe91951..ce38a6e 100644
71435--- a/fs/gfs2/glops.c
71436+++ b/fs/gfs2/glops.c
71437@@ -544,9 +544,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
71438
71439 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
71440 gl->gl_state == LM_ST_SHARED && ip) {
71441- gl->gl_lockref.count++;
71442+ __lockref_inc(&gl->gl_lockref);
71443 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
71444- gl->gl_lockref.count--;
71445+ __lockref_dec(&gl->gl_lockref);
71446 }
71447 }
71448
71449diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
71450index 3aa17d4..b338075 100644
71451--- a/fs/gfs2/quota.c
71452+++ b/fs/gfs2/quota.c
71453@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
71454 if (!spin_trylock(&qd->qd_lockref.lock))
71455 return LRU_SKIP;
71456
71457- if (qd->qd_lockref.count == 0) {
71458+ if (__lockref_read(&qd->qd_lockref) == 0) {
71459 lockref_mark_dead(&qd->qd_lockref);
71460 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
71461 }
71462@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
71463 return NULL;
71464
71465 qd->qd_sbd = sdp;
71466- qd->qd_lockref.count = 1;
71467+ __lockref_set(&qd->qd_lockref, 1);
71468 spin_lock_init(&qd->qd_lockref.lock);
71469 qd->qd_id = qid;
71470 qd->qd_slot = -1;
71471@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
71472 if (lockref_put_or_lock(&qd->qd_lockref))
71473 return;
71474
71475- qd->qd_lockref.count = 0;
71476+ __lockref_set(&qd->qd_lockref, 0);
71477 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
71478 spin_unlock(&qd->qd_lockref.lock);
71479
71480diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
71481index fd62cae..3494dfa 100644
71482--- a/fs/hostfs/hostfs_kern.c
71483+++ b/fs/hostfs/hostfs_kern.c
71484@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
71485
71486 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
71487 {
71488- char *s = nd_get_link(nd);
71489+ const char *s = nd_get_link(nd);
71490 if (!IS_ERR(s))
71491 __putname(s);
71492 }
71493diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
71494index c274aca..772fa5e 100644
71495--- a/fs/hugetlbfs/inode.c
71496+++ b/fs/hugetlbfs/inode.c
71497@@ -148,6 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
71498 struct mm_struct *mm = current->mm;
71499 struct vm_area_struct *vma;
71500 struct hstate *h = hstate_file(file);
71501+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
71502 struct vm_unmapped_area_info info;
71503
71504 if (len & ~huge_page_mask(h))
71505@@ -161,17 +162,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
71506 return addr;
71507 }
71508
71509+#ifdef CONFIG_PAX_RANDMMAP
71510+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71511+#endif
71512+
71513 if (addr) {
71514 addr = ALIGN(addr, huge_page_size(h));
71515 vma = find_vma(mm, addr);
71516- if (TASK_SIZE - len >= addr &&
71517- (!vma || addr + len <= vma->vm_start))
71518+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
71519 return addr;
71520 }
71521
71522 info.flags = 0;
71523 info.length = len;
71524 info.low_limit = TASK_UNMAPPED_BASE;
71525+
71526+#ifdef CONFIG_PAX_RANDMMAP
71527+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71528+ info.low_limit += mm->delta_mmap;
71529+#endif
71530+
71531 info.high_limit = TASK_SIZE;
71532 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
71533 info.align_offset = 0;
71534@@ -912,7 +922,7 @@ static struct file_system_type hugetlbfs_fs_type = {
71535 };
71536 MODULE_ALIAS_FS("hugetlbfs");
71537
71538-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
71539+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
71540
71541 static int can_do_hugetlb_shm(void)
71542 {
71543diff --git a/fs/inode.c b/fs/inode.c
71544index f00b16f..b653fea 100644
71545--- a/fs/inode.c
71546+++ b/fs/inode.c
71547@@ -830,16 +830,20 @@ unsigned int get_next_ino(void)
71548 unsigned int *p = &get_cpu_var(last_ino);
71549 unsigned int res = *p;
71550
71551+start:
71552+
71553 #ifdef CONFIG_SMP
71554 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
71555- static atomic_t shared_last_ino;
71556- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
71557+ static atomic_unchecked_t shared_last_ino;
71558+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
71559
71560 res = next - LAST_INO_BATCH;
71561 }
71562 #endif
71563
71564- *p = ++res;
71565+ if (unlikely(!++res))
71566+ goto start; /* never zero */
71567+ *p = res;
71568 put_cpu_var(last_ino);
71569 return res;
71570 }
71571diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
71572index 4a6cf28..d3a29d3 100644
71573--- a/fs/jffs2/erase.c
71574+++ b/fs/jffs2/erase.c
71575@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
71576 struct jffs2_unknown_node marker = {
71577 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
71578 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
71579- .totlen = cpu_to_je32(c->cleanmarker_size)
71580+ .totlen = cpu_to_je32(c->cleanmarker_size),
71581+ .hdr_crc = cpu_to_je32(0)
71582 };
71583
71584 jffs2_prealloc_raw_node_refs(c, jeb, 1);
71585diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
71586index 09ed551..45684f8 100644
71587--- a/fs/jffs2/wbuf.c
71588+++ b/fs/jffs2/wbuf.c
71589@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
71590 {
71591 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
71592 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
71593- .totlen = constant_cpu_to_je32(8)
71594+ .totlen = constant_cpu_to_je32(8),
71595+ .hdr_crc = constant_cpu_to_je32(0)
71596 };
71597
71598 /*
71599diff --git a/fs/jfs/super.c b/fs/jfs/super.c
71600index 5d30c56..8c45372 100644
71601--- a/fs/jfs/super.c
71602+++ b/fs/jfs/super.c
71603@@ -901,7 +901,7 @@ static int __init init_jfs_fs(void)
71604
71605 jfs_inode_cachep =
71606 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
71607- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
71608+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
71609 init_once);
71610 if (jfs_inode_cachep == NULL)
71611 return -ENOMEM;
71612diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
71613index 6acc964..eca491f 100644
71614--- a/fs/kernfs/dir.c
71615+++ b/fs/kernfs/dir.c
71616@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
71617 *
71618 * Returns 31 bit hash of ns + name (so it fits in an off_t )
71619 */
71620-static unsigned int kernfs_name_hash(const char *name, const void *ns)
71621+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
71622 {
71623 unsigned long hash = init_name_hash();
71624 unsigned int len = strlen(name);
71625@@ -831,6 +831,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
71626 ret = scops->mkdir(parent, dentry->d_name.name, mode);
71627
71628 kernfs_put_active(parent);
71629+
71630+ if (!ret) {
71631+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
71632+ ret = PTR_ERR_OR_ZERO(dentry_ret);
71633+ }
71634+
71635 return ret;
71636 }
71637
71638diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
71639index 2bacb99..f745182 100644
71640--- a/fs/kernfs/file.c
71641+++ b/fs/kernfs/file.c
71642@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
71643
71644 struct kernfs_open_node {
71645 atomic_t refcnt;
71646- atomic_t event;
71647+ atomic_unchecked_t event;
71648 wait_queue_head_t poll;
71649 struct list_head files; /* goes through kernfs_open_file.list */
71650 };
71651@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
71652 {
71653 struct kernfs_open_file *of = sf->private;
71654
71655- of->event = atomic_read(&of->kn->attr.open->event);
71656+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
71657
71658 return of->kn->attr.ops->seq_show(sf, v);
71659 }
71660@@ -207,7 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
71661 goto out_free;
71662 }
71663
71664- of->event = atomic_read(&of->kn->attr.open->event);
71665+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
71666 ops = kernfs_ops(of->kn);
71667 if (ops->read)
71668 len = ops->read(of, buf, len, *ppos);
71669@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
71670 {
71671 struct kernfs_open_file *of = kernfs_of(file);
71672 const struct kernfs_ops *ops;
71673- size_t len;
71674+ ssize_t len;
71675 char *buf;
71676
71677 if (of->atomic_write_len) {
71678@@ -385,12 +385,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
71679 return ret;
71680 }
71681
71682-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
71683- void *buf, int len, int write)
71684+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
71685+ void *buf, size_t len, int write)
71686 {
71687 struct file *file = vma->vm_file;
71688 struct kernfs_open_file *of = kernfs_of(file);
71689- int ret;
71690+ ssize_t ret;
71691
71692 if (!of->vm_ops)
71693 return -EINVAL;
71694@@ -569,7 +569,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
71695 return -ENOMEM;
71696
71697 atomic_set(&new_on->refcnt, 0);
71698- atomic_set(&new_on->event, 1);
71699+ atomic_set_unchecked(&new_on->event, 1);
71700 init_waitqueue_head(&new_on->poll);
71701 INIT_LIST_HEAD(&new_on->files);
71702 goto retry;
71703@@ -793,7 +793,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
71704
71705 kernfs_put_active(kn);
71706
71707- if (of->event != atomic_read(&on->event))
71708+ if (of->event != atomic_read_unchecked(&on->event))
71709 goto trigger;
71710
71711 return DEFAULT_POLLMASK;
71712@@ -824,7 +824,7 @@ repeat:
71713
71714 on = kn->attr.open;
71715 if (on) {
71716- atomic_inc(&on->event);
71717+ atomic_inc_unchecked(&on->event);
71718 wake_up_interruptible(&on->poll);
71719 }
71720
71721diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
71722index 8a19889..4c3069a 100644
71723--- a/fs/kernfs/symlink.c
71724+++ b/fs/kernfs/symlink.c
71725@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
71726 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
71727 void *cookie)
71728 {
71729- char *page = nd_get_link(nd);
71730+ const char *page = nd_get_link(nd);
71731 if (!IS_ERR(page))
71732 free_page((unsigned long)page);
71733 }
71734diff --git a/fs/libfs.c b/fs/libfs.c
71735index 0ab6512..cd9982d 100644
71736--- a/fs/libfs.c
71737+++ b/fs/libfs.c
71738@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
71739
71740 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
71741 struct dentry *next = list_entry(p, struct dentry, d_child);
71742+ char d_name[sizeof(next->d_iname)];
71743+ const unsigned char *name;
71744+
71745 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
71746 if (!simple_positive(next)) {
71747 spin_unlock(&next->d_lock);
71748@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
71749
71750 spin_unlock(&next->d_lock);
71751 spin_unlock(&dentry->d_lock);
71752- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
71753+ name = next->d_name.name;
71754+ if (name == next->d_iname) {
71755+ memcpy(d_name, name, next->d_name.len);
71756+ name = d_name;
71757+ }
71758+ if (!dir_emit(ctx, name, next->d_name.len,
71759 next->d_inode->i_ino, dt_type(next->d_inode)))
71760 return 0;
71761 spin_lock(&dentry->d_lock);
71762@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
71763 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
71764 void *cookie)
71765 {
71766- char *s = nd_get_link(nd);
71767+ const char *s = nd_get_link(nd);
71768 if (!IS_ERR(s))
71769 kfree(s);
71770 }
71771diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
71772index acd3947..1f896e2 100644
71773--- a/fs/lockd/clntproc.c
71774+++ b/fs/lockd/clntproc.c
71775@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
71776 /*
71777 * Cookie counter for NLM requests
71778 */
71779-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
71780+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
71781
71782 void nlmclnt_next_cookie(struct nlm_cookie *c)
71783 {
71784- u32 cookie = atomic_inc_return(&nlm_cookie);
71785+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
71786
71787 memcpy(c->data, &cookie, 4);
71788 c->len=4;
71789diff --git a/fs/mount.h b/fs/mount.h
71790index 6a61c2b..bd79179 100644
71791--- a/fs/mount.h
71792+++ b/fs/mount.h
71793@@ -13,7 +13,7 @@ struct mnt_namespace {
71794 u64 seq; /* Sequence number to prevent loops */
71795 wait_queue_head_t poll;
71796 u64 event;
71797-};
71798+} __randomize_layout;
71799
71800 struct mnt_pcp {
71801 int mnt_count;
71802@@ -65,7 +65,7 @@ struct mount {
71803 struct hlist_head mnt_pins;
71804 struct fs_pin mnt_umount;
71805 struct dentry *mnt_ex_mountpoint;
71806-};
71807+} __randomize_layout;
71808
71809 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
71810
71811diff --git a/fs/namei.c b/fs/namei.c
71812index 50a8583..44c470a 100644
71813--- a/fs/namei.c
71814+++ b/fs/namei.c
71815@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
71816 if (ret != -EACCES)
71817 return ret;
71818
71819+#ifdef CONFIG_GRKERNSEC
71820+ /* we'll block if we have to log due to a denied capability use */
71821+ if (mask & MAY_NOT_BLOCK)
71822+ return -ECHILD;
71823+#endif
71824+
71825 if (S_ISDIR(inode->i_mode)) {
71826 /* DACs are overridable for directories */
71827- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
71828- return 0;
71829 if (!(mask & MAY_WRITE))
71830- if (capable_wrt_inode_uidgid(inode,
71831- CAP_DAC_READ_SEARCH))
71832+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
71833+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
71834 return 0;
71835+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
71836+ return 0;
71837 return -EACCES;
71838 }
71839 /*
71840+ * Searching includes executable on directories, else just read.
71841+ */
71842+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
71843+ if (mask == MAY_READ)
71844+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
71845+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
71846+ return 0;
71847+
71848+ /*
71849 * Read/write DACs are always overridable.
71850 * Executable DACs are overridable when there is
71851 * at least one exec bit set.
71852@@ -356,14 +371,6 @@ int generic_permission(struct inode *inode, int mask)
71853 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
71854 return 0;
71855
71856- /*
71857- * Searching includes executable on directories, else just read.
71858- */
71859- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
71860- if (mask == MAY_READ)
71861- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
71862- return 0;
71863-
71864 return -EACCES;
71865 }
71866 EXPORT_SYMBOL(generic_permission);
71867@@ -503,7 +510,7 @@ struct nameidata {
71868 int last_type;
71869 unsigned depth;
71870 struct file *base;
71871- char *saved_names[MAX_NESTED_LINKS + 1];
71872+ const char *saved_names[MAX_NESTED_LINKS + 1];
71873 };
71874
71875 /*
71876@@ -714,13 +721,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
71877 nd->flags |= LOOKUP_JUMPED;
71878 }
71879
71880-void nd_set_link(struct nameidata *nd, char *path)
71881+void nd_set_link(struct nameidata *nd, const char *path)
71882 {
71883 nd->saved_names[nd->depth] = path;
71884 }
71885 EXPORT_SYMBOL(nd_set_link);
71886
71887-char *nd_get_link(struct nameidata *nd)
71888+const char *nd_get_link(const struct nameidata *nd)
71889 {
71890 return nd->saved_names[nd->depth];
71891 }
71892@@ -855,7 +862,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
71893 {
71894 struct dentry *dentry = link->dentry;
71895 int error;
71896- char *s;
71897+ const char *s;
71898
71899 BUG_ON(nd->flags & LOOKUP_RCU);
71900
71901@@ -876,6 +883,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
71902 if (error)
71903 goto out_put_nd_path;
71904
71905+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
71906+ dentry->d_inode, dentry, nd->path.mnt)) {
71907+ error = -EACCES;
71908+ goto out_put_nd_path;
71909+ }
71910+
71911 nd->last_type = LAST_BIND;
71912 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
71913 error = PTR_ERR(*p);
71914@@ -1640,6 +1653,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
71915 if (res)
71916 break;
71917 res = walk_component(nd, path, LOOKUP_FOLLOW);
71918+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
71919+ res = -EACCES;
71920 put_link(nd, &link, cookie);
71921 } while (res > 0);
71922
71923@@ -1712,7 +1727,7 @@ EXPORT_SYMBOL(full_name_hash);
71924 static inline u64 hash_name(const char *name)
71925 {
71926 unsigned long a, b, adata, bdata, mask, hash, len;
71927- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
71928+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
71929
71930 hash = a = 0;
71931 len = -sizeof(unsigned long);
71932@@ -2007,6 +2022,8 @@ static int path_lookupat(int dfd, const char *name,
71933 if (err)
71934 break;
71935 err = lookup_last(nd, &path);
71936+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
71937+ err = -EACCES;
71938 put_link(nd, &link, cookie);
71939 }
71940 }
71941@@ -2014,6 +2031,13 @@ static int path_lookupat(int dfd, const char *name,
71942 if (!err)
71943 err = complete_walk(nd);
71944
71945+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
71946+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
71947+ path_put(&nd->path);
71948+ err = -ENOENT;
71949+ }
71950+ }
71951+
71952 if (!err && nd->flags & LOOKUP_DIRECTORY) {
71953 if (!d_can_lookup(nd->path.dentry)) {
71954 path_put(&nd->path);
71955@@ -2035,8 +2059,15 @@ static int filename_lookup(int dfd, struct filename *name,
71956 retval = path_lookupat(dfd, name->name,
71957 flags | LOOKUP_REVAL, nd);
71958
71959- if (likely(!retval))
71960+ if (likely(!retval)) {
71961 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
71962+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
71963+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
71964+ path_put(&nd->path);
71965+ return -ENOENT;
71966+ }
71967+ }
71968+ }
71969 return retval;
71970 }
71971
71972@@ -2615,6 +2646,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
71973 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
71974 return -EPERM;
71975
71976+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
71977+ return -EPERM;
71978+ if (gr_handle_rawio(inode))
71979+ return -EPERM;
71980+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
71981+ return -EACCES;
71982+
71983 return 0;
71984 }
71985
71986@@ -2846,7 +2884,7 @@ looked_up:
71987 * cleared otherwise prior to returning.
71988 */
71989 static int lookup_open(struct nameidata *nd, struct path *path,
71990- struct file *file,
71991+ struct path *link, struct file *file,
71992 const struct open_flags *op,
71993 bool got_write, int *opened)
71994 {
71995@@ -2881,6 +2919,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
71996 /* Negative dentry, just create the file */
71997 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
71998 umode_t mode = op->mode;
71999+
72000+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
72001+ error = -EACCES;
72002+ goto out_dput;
72003+ }
72004+
72005+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
72006+ error = -EACCES;
72007+ goto out_dput;
72008+ }
72009+
72010 if (!IS_POSIXACL(dir->d_inode))
72011 mode &= ~current_umask();
72012 /*
72013@@ -2902,6 +2951,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
72014 nd->flags & LOOKUP_EXCL);
72015 if (error)
72016 goto out_dput;
72017+ else
72018+ gr_handle_create(dentry, nd->path.mnt);
72019 }
72020 out_no_open:
72021 path->dentry = dentry;
72022@@ -2916,7 +2967,7 @@ out_dput:
72023 /*
72024 * Handle the last step of open()
72025 */
72026-static int do_last(struct nameidata *nd, struct path *path,
72027+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
72028 struct file *file, const struct open_flags *op,
72029 int *opened, struct filename *name)
72030 {
72031@@ -2966,6 +3017,15 @@ static int do_last(struct nameidata *nd, struct path *path,
72032 if (error)
72033 return error;
72034
72035+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
72036+ error = -ENOENT;
72037+ goto out;
72038+ }
72039+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
72040+ error = -EACCES;
72041+ goto out;
72042+ }
72043+
72044 audit_inode(name, dir, LOOKUP_PARENT);
72045 error = -EISDIR;
72046 /* trailing slashes? */
72047@@ -2985,7 +3045,7 @@ retry_lookup:
72048 */
72049 }
72050 mutex_lock(&dir->d_inode->i_mutex);
72051- error = lookup_open(nd, path, file, op, got_write, opened);
72052+ error = lookup_open(nd, path, link, file, op, got_write, opened);
72053 mutex_unlock(&dir->d_inode->i_mutex);
72054
72055 if (error <= 0) {
72056@@ -3009,11 +3069,28 @@ retry_lookup:
72057 goto finish_open_created;
72058 }
72059
72060+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
72061+ error = -ENOENT;
72062+ goto exit_dput;
72063+ }
72064+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
72065+ error = -EACCES;
72066+ goto exit_dput;
72067+ }
72068+
72069 /*
72070 * create/update audit record if it already exists.
72071 */
72072- if (d_is_positive(path->dentry))
72073+ if (d_is_positive(path->dentry)) {
72074+ /* only check if O_CREAT is specified, all other checks need to go
72075+ into may_open */
72076+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
72077+ error = -EACCES;
72078+ goto exit_dput;
72079+ }
72080+
72081 audit_inode(name, path->dentry, 0);
72082+ }
72083
72084 /*
72085 * If atomic_open() acquired write access it is dropped now due to
72086@@ -3055,6 +3132,11 @@ finish_lookup:
72087 }
72088 }
72089 BUG_ON(inode != path->dentry->d_inode);
72090+ /* if we're resolving a symlink to another symlink */
72091+ if (link && gr_handle_symlink_owner(link, inode)) {
72092+ error = -EACCES;
72093+ goto out;
72094+ }
72095 return 1;
72096 }
72097
72098@@ -3074,7 +3156,18 @@ finish_open:
72099 path_put(&save_parent);
72100 return error;
72101 }
72102+
72103+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
72104+ error = -ENOENT;
72105+ goto out;
72106+ }
72107+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
72108+ error = -EACCES;
72109+ goto out;
72110+ }
72111+
72112 audit_inode(name, nd->path.dentry, 0);
72113+
72114 error = -EISDIR;
72115 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
72116 goto out;
72117@@ -3235,7 +3328,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
72118 if (unlikely(error))
72119 goto out;
72120
72121- error = do_last(nd, &path, file, op, &opened, pathname);
72122+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
72123 while (unlikely(error > 0)) { /* trailing symlink */
72124 struct path link = path;
72125 void *cookie;
72126@@ -3253,7 +3346,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
72127 error = follow_link(&link, nd, &cookie);
72128 if (unlikely(error))
72129 break;
72130- error = do_last(nd, &path, file, op, &opened, pathname);
72131+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
72132 put_link(nd, &link, cookie);
72133 }
72134 out:
72135@@ -3356,9 +3449,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
72136 goto unlock;
72137
72138 error = -EEXIST;
72139- if (d_is_positive(dentry))
72140+ if (d_is_positive(dentry)) {
72141+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
72142+ error = -ENOENT;
72143 goto fail;
72144-
72145+ }
72146 /*
72147 * Special case - lookup gave negative, but... we had foo/bar/
72148 * From the vfs_mknod() POV we just have a negative dentry -
72149@@ -3423,6 +3518,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
72150 }
72151 EXPORT_SYMBOL(user_path_create);
72152
72153+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
72154+{
72155+ struct filename *tmp = getname(pathname);
72156+ struct dentry *res;
72157+ if (IS_ERR(tmp))
72158+ return ERR_CAST(tmp);
72159+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
72160+ if (IS_ERR(res))
72161+ putname(tmp);
72162+ else
72163+ *to = tmp;
72164+ return res;
72165+}
72166+
72167 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
72168 {
72169 int error = may_create(dir, dentry);
72170@@ -3486,6 +3595,17 @@ retry:
72171
72172 if (!IS_POSIXACL(path.dentry->d_inode))
72173 mode &= ~current_umask();
72174+
72175+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
72176+ error = -EPERM;
72177+ goto out;
72178+ }
72179+
72180+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
72181+ error = -EACCES;
72182+ goto out;
72183+ }
72184+
72185 error = security_path_mknod(&path, dentry, mode, dev);
72186 if (error)
72187 goto out;
72188@@ -3501,6 +3621,8 @@ retry:
72189 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
72190 break;
72191 }
72192+ if (!error)
72193+ gr_handle_create(dentry, path.mnt);
72194 out:
72195 done_path_create(&path, dentry);
72196 if (retry_estale(error, lookup_flags)) {
72197@@ -3555,9 +3677,16 @@ retry:
72198
72199 if (!IS_POSIXACL(path.dentry->d_inode))
72200 mode &= ~current_umask();
72201+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
72202+ error = -EACCES;
72203+ goto out;
72204+ }
72205 error = security_path_mkdir(&path, dentry, mode);
72206 if (!error)
72207 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
72208+ if (!error)
72209+ gr_handle_create(dentry, path.mnt);
72210+out:
72211 done_path_create(&path, dentry);
72212 if (retry_estale(error, lookup_flags)) {
72213 lookup_flags |= LOOKUP_REVAL;
72214@@ -3590,7 +3719,7 @@ void dentry_unhash(struct dentry *dentry)
72215 {
72216 shrink_dcache_parent(dentry);
72217 spin_lock(&dentry->d_lock);
72218- if (dentry->d_lockref.count == 1)
72219+ if (__lockref_read(&dentry->d_lockref) == 1)
72220 __d_drop(dentry);
72221 spin_unlock(&dentry->d_lock);
72222 }
72223@@ -3641,6 +3770,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
72224 struct filename *name;
72225 struct dentry *dentry;
72226 struct nameidata nd;
72227+ u64 saved_ino = 0;
72228+ dev_t saved_dev = 0;
72229 unsigned int lookup_flags = 0;
72230 retry:
72231 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
72232@@ -3673,10 +3804,21 @@ retry:
72233 error = -ENOENT;
72234 goto exit3;
72235 }
72236+
72237+ saved_ino = gr_get_ino_from_dentry(dentry);
72238+ saved_dev = gr_get_dev_from_dentry(dentry);
72239+
72240+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
72241+ error = -EACCES;
72242+ goto exit3;
72243+ }
72244+
72245 error = security_path_rmdir(&nd.path, dentry);
72246 if (error)
72247 goto exit3;
72248 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
72249+ if (!error && (saved_dev || saved_ino))
72250+ gr_handle_delete(saved_ino, saved_dev);
72251 exit3:
72252 dput(dentry);
72253 exit2:
72254@@ -3769,6 +3911,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
72255 struct nameidata nd;
72256 struct inode *inode = NULL;
72257 struct inode *delegated_inode = NULL;
72258+ u64 saved_ino = 0;
72259+ dev_t saved_dev = 0;
72260 unsigned int lookup_flags = 0;
72261 retry:
72262 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
72263@@ -3795,10 +3939,22 @@ retry_deleg:
72264 if (d_is_negative(dentry))
72265 goto slashes;
72266 ihold(inode);
72267+
72268+ if (inode->i_nlink <= 1) {
72269+ saved_ino = gr_get_ino_from_dentry(dentry);
72270+ saved_dev = gr_get_dev_from_dentry(dentry);
72271+ }
72272+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
72273+ error = -EACCES;
72274+ goto exit2;
72275+ }
72276+
72277 error = security_path_unlink(&nd.path, dentry);
72278 if (error)
72279 goto exit2;
72280 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
72281+ if (!error && (saved_ino || saved_dev))
72282+ gr_handle_delete(saved_ino, saved_dev);
72283 exit2:
72284 dput(dentry);
72285 }
72286@@ -3887,9 +4043,17 @@ retry:
72287 if (IS_ERR(dentry))
72288 goto out_putname;
72289
72290+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
72291+ error = -EACCES;
72292+ goto out;
72293+ }
72294+
72295 error = security_path_symlink(&path, dentry, from->name);
72296 if (!error)
72297 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
72298+ if (!error)
72299+ gr_handle_create(dentry, path.mnt);
72300+out:
72301 done_path_create(&path, dentry);
72302 if (retry_estale(error, lookup_flags)) {
72303 lookup_flags |= LOOKUP_REVAL;
72304@@ -3993,6 +4157,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
72305 struct dentry *new_dentry;
72306 struct path old_path, new_path;
72307 struct inode *delegated_inode = NULL;
72308+ struct filename *to = NULL;
72309 int how = 0;
72310 int error;
72311
72312@@ -4016,7 +4181,7 @@ retry:
72313 if (error)
72314 return error;
72315
72316- new_dentry = user_path_create(newdfd, newname, &new_path,
72317+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
72318 (how & LOOKUP_REVAL));
72319 error = PTR_ERR(new_dentry);
72320 if (IS_ERR(new_dentry))
72321@@ -4028,11 +4193,28 @@ retry:
72322 error = may_linkat(&old_path);
72323 if (unlikely(error))
72324 goto out_dput;
72325+
72326+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
72327+ old_path.dentry->d_inode,
72328+ old_path.dentry->d_inode->i_mode, to)) {
72329+ error = -EACCES;
72330+ goto out_dput;
72331+ }
72332+
72333+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
72334+ old_path.dentry, old_path.mnt, to)) {
72335+ error = -EACCES;
72336+ goto out_dput;
72337+ }
72338+
72339 error = security_path_link(old_path.dentry, &new_path, new_dentry);
72340 if (error)
72341 goto out_dput;
72342 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
72343+ if (!error)
72344+ gr_handle_create(new_dentry, new_path.mnt);
72345 out_dput:
72346+ putname(to);
72347 done_path_create(&new_path, new_dentry);
72348 if (delegated_inode) {
72349 error = break_deleg_wait(&delegated_inode);
72350@@ -4348,6 +4530,20 @@ retry_deleg:
72351 if (new_dentry == trap)
72352 goto exit5;
72353
72354+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
72355+ /* use EXDEV error to cause 'mv' to switch to an alternative
72356+ * method for usability
72357+ */
72358+ error = -EXDEV;
72359+ goto exit5;
72360+ }
72361+
72362+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
72363+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
72364+ to, flags);
72365+ if (error)
72366+ goto exit5;
72367+
72368 error = security_path_rename(&oldnd.path, old_dentry,
72369 &newnd.path, new_dentry, flags);
72370 if (error)
72371@@ -4355,6 +4551,9 @@ retry_deleg:
72372 error = vfs_rename(old_dir->d_inode, old_dentry,
72373 new_dir->d_inode, new_dentry,
72374 &delegated_inode, flags);
72375+ if (!error)
72376+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
72377+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
72378 exit5:
72379 dput(new_dentry);
72380 exit4:
72381@@ -4411,14 +4610,24 @@ EXPORT_SYMBOL(vfs_whiteout);
72382
72383 int readlink_copy(char __user *buffer, int buflen, const char *link)
72384 {
72385+ char tmpbuf[64];
72386+ const char *newlink;
72387 int len = PTR_ERR(link);
72388+
72389 if (IS_ERR(link))
72390 goto out;
72391
72392 len = strlen(link);
72393 if (len > (unsigned) buflen)
72394 len = buflen;
72395- if (copy_to_user(buffer, link, len))
72396+
72397+ if (len < sizeof(tmpbuf)) {
72398+ memcpy(tmpbuf, link, len);
72399+ newlink = tmpbuf;
72400+ } else
72401+ newlink = link;
72402+
72403+ if (copy_to_user(buffer, newlink, len))
72404 len = -EFAULT;
72405 out:
72406 return len;
72407diff --git a/fs/namespace.c b/fs/namespace.c
72408index 38ed1e1..8500e56 100644
72409--- a/fs/namespace.c
72410+++ b/fs/namespace.c
72411@@ -1480,6 +1480,9 @@ static int do_umount(struct mount *mnt, int flags)
72412 if (!(sb->s_flags & MS_RDONLY))
72413 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
72414 up_write(&sb->s_umount);
72415+
72416+ gr_log_remount(mnt->mnt_devname, retval);
72417+
72418 return retval;
72419 }
72420
72421@@ -1502,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
72422 }
72423 unlock_mount_hash();
72424 namespace_unlock();
72425+
72426+ gr_log_unmount(mnt->mnt_devname, retval);
72427+
72428 return retval;
72429 }
72430
72431@@ -1559,7 +1565,7 @@ static inline bool may_mount(void)
72432 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
72433 */
72434
72435-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
72436+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
72437 {
72438 struct path path;
72439 struct mount *mnt;
72440@@ -1604,7 +1610,7 @@ out:
72441 /*
72442 * The 2.0 compatible umount. No flags.
72443 */
72444-SYSCALL_DEFINE1(oldumount, char __user *, name)
72445+SYSCALL_DEFINE1(oldumount, const char __user *, name)
72446 {
72447 return sys_umount(name, 0);
72448 }
72449@@ -2670,6 +2676,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
72450 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
72451 MS_STRICTATIME);
72452
72453+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
72454+ retval = -EPERM;
72455+ goto dput_out;
72456+ }
72457+
72458+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
72459+ retval = -EPERM;
72460+ goto dput_out;
72461+ }
72462+
72463 if (flags & MS_REMOUNT)
72464 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
72465 data_page);
72466@@ -2683,7 +2699,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
72467 retval = do_new_mount(&path, type_page, flags, mnt_flags,
72468 dev_name, data_page);
72469 dput_out:
72470+ gr_log_mount(dev_name, &path, retval);
72471+
72472 path_put(&path);
72473+
72474 return retval;
72475 }
72476
72477@@ -2701,7 +2720,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
72478 * number incrementing at 10Ghz will take 12,427 years to wrap which
72479 * is effectively never, so we can ignore the possibility.
72480 */
72481-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
72482+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
72483
72484 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72485 {
72486@@ -2717,7 +2736,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72487 return ERR_PTR(ret);
72488 }
72489 new_ns->ns.ops = &mntns_operations;
72490- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
72491+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
72492 atomic_set(&new_ns->count, 1);
72493 new_ns->root = NULL;
72494 INIT_LIST_HEAD(&new_ns->list);
72495@@ -2727,7 +2746,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72496 return new_ns;
72497 }
72498
72499-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
72500+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
72501 struct user_namespace *user_ns, struct fs_struct *new_fs)
72502 {
72503 struct mnt_namespace *new_ns;
72504@@ -2848,8 +2867,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
72505 }
72506 EXPORT_SYMBOL(mount_subtree);
72507
72508-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
72509- char __user *, type, unsigned long, flags, void __user *, data)
72510+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
72511+ const char __user *, type, unsigned long, flags, void __user *, data)
72512 {
72513 int ret;
72514 char *kernel_type;
72515@@ -2955,6 +2974,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
72516 if (error)
72517 goto out2;
72518
72519+ if (gr_handle_chroot_pivot()) {
72520+ error = -EPERM;
72521+ goto out2;
72522+ }
72523+
72524 get_fs_root(current->fs, &root);
72525 old_mp = lock_mount(&old);
72526 error = PTR_ERR(old_mp);
72527@@ -3235,7 +3259,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
72528 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
72529 return -EPERM;
72530
72531- if (fs->users != 1)
72532+ if (atomic_read(&fs->users) != 1)
72533 return -EINVAL;
72534
72535 get_mnt_ns(mnt_ns);
72536diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
72537index 19ca95c..b28702c 100644
72538--- a/fs/nfs/callback_xdr.c
72539+++ b/fs/nfs/callback_xdr.c
72540@@ -51,7 +51,7 @@ struct callback_op {
72541 callback_decode_arg_t decode_args;
72542 callback_encode_res_t encode_res;
72543 long res_maxsize;
72544-};
72545+} __do_const;
72546
72547 static struct callback_op callback_ops[];
72548
72549diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
72550index d42dff6..ecbdf42 100644
72551--- a/fs/nfs/inode.c
72552+++ b/fs/nfs/inode.c
72553@@ -1270,16 +1270,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
72554 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
72555 }
72556
72557-static atomic_long_t nfs_attr_generation_counter;
72558+static atomic_long_unchecked_t nfs_attr_generation_counter;
72559
72560 static unsigned long nfs_read_attr_generation_counter(void)
72561 {
72562- return atomic_long_read(&nfs_attr_generation_counter);
72563+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
72564 }
72565
72566 unsigned long nfs_inc_attr_generation_counter(void)
72567 {
72568- return atomic_long_inc_return(&nfs_attr_generation_counter);
72569+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
72570 }
72571 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
72572
72573diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
72574index 5416968..0942042 100644
72575--- a/fs/nfsd/nfs4proc.c
72576+++ b/fs/nfsd/nfs4proc.c
72577@@ -1496,7 +1496,7 @@ struct nfsd4_operation {
72578 nfsd4op_rsize op_rsize_bop;
72579 stateid_getter op_get_currentstateid;
72580 stateid_setter op_set_currentstateid;
72581-};
72582+} __do_const;
72583
72584 static struct nfsd4_operation nfsd4_ops[];
72585
72586diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
72587index 5b33ce1..c2a92aa 100644
72588--- a/fs/nfsd/nfs4xdr.c
72589+++ b/fs/nfsd/nfs4xdr.c
72590@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
72591
72592 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
72593
72594-static nfsd4_dec nfsd4_dec_ops[] = {
72595+static const nfsd4_dec nfsd4_dec_ops[] = {
72596 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
72597 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
72598 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
72599diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
72600index 46ec934..f384e41 100644
72601--- a/fs/nfsd/nfscache.c
72602+++ b/fs/nfsd/nfscache.c
72603@@ -541,7 +541,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72604 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
72605 u32 hash;
72606 struct nfsd_drc_bucket *b;
72607- int len;
72608+ long len;
72609 size_t bufsize = 0;
72610
72611 if (!rp)
72612@@ -550,11 +550,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72613 hash = nfsd_cache_hash(rp->c_xid);
72614 b = &drc_hashtbl[hash];
72615
72616- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
72617- len >>= 2;
72618+ if (statp) {
72619+ len = (char*)statp - (char*)resv->iov_base;
72620+ len = resv->iov_len - len;
72621+ len >>= 2;
72622+ }
72623
72624 /* Don't cache excessive amounts of data and XDR failures */
72625- if (!statp || len > (256 >> 2)) {
72626+ if (!statp || len > (256 >> 2) || len < 0) {
72627 nfsd_reply_cache_free(b, rp);
72628 return;
72629 }
72630@@ -562,7 +565,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72631 switch (cachetype) {
72632 case RC_REPLSTAT:
72633 if (len != 1)
72634- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
72635+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
72636 rp->c_replstat = *statp;
72637 break;
72638 case RC_REPLBUFF:
72639diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
72640index 3685265..e77261e 100644
72641--- a/fs/nfsd/vfs.c
72642+++ b/fs/nfsd/vfs.c
72643@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
72644
72645 oldfs = get_fs();
72646 set_fs(KERNEL_DS);
72647- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
72648+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
72649 set_fs(oldfs);
72650 return nfsd_finish_read(file, count, host_err);
72651 }
72652@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
72653
72654 /* Write the data. */
72655 oldfs = get_fs(); set_fs(KERNEL_DS);
72656- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
72657+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
72658 set_fs(oldfs);
72659 if (host_err < 0)
72660 goto out_nfserr;
72661@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
72662 */
72663
72664 oldfs = get_fs(); set_fs(KERNEL_DS);
72665- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
72666+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
72667 set_fs(oldfs);
72668
72669 if (host_err < 0)
72670diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
72671index 52ccd34..7a6b202 100644
72672--- a/fs/nls/nls_base.c
72673+++ b/fs/nls/nls_base.c
72674@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
72675
72676 int __register_nls(struct nls_table *nls, struct module *owner)
72677 {
72678- struct nls_table ** tmp = &tables;
72679+ struct nls_table *tmp = tables;
72680
72681 if (nls->next)
72682 return -EBUSY;
72683
72684- nls->owner = owner;
72685+ pax_open_kernel();
72686+ *(void **)&nls->owner = owner;
72687+ pax_close_kernel();
72688 spin_lock(&nls_lock);
72689- while (*tmp) {
72690- if (nls == *tmp) {
72691+ while (tmp) {
72692+ if (nls == tmp) {
72693 spin_unlock(&nls_lock);
72694 return -EBUSY;
72695 }
72696- tmp = &(*tmp)->next;
72697+ tmp = tmp->next;
72698 }
72699- nls->next = tables;
72700+ pax_open_kernel();
72701+ *(struct nls_table **)&nls->next = tables;
72702+ pax_close_kernel();
72703 tables = nls;
72704 spin_unlock(&nls_lock);
72705 return 0;
72706@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
72707
72708 int unregister_nls(struct nls_table * nls)
72709 {
72710- struct nls_table ** tmp = &tables;
72711+ struct nls_table * const * tmp = &tables;
72712
72713 spin_lock(&nls_lock);
72714 while (*tmp) {
72715 if (nls == *tmp) {
72716- *tmp = nls->next;
72717+ pax_open_kernel();
72718+ *(struct nls_table **)tmp = nls->next;
72719+ pax_close_kernel();
72720 spin_unlock(&nls_lock);
72721 return 0;
72722 }
72723@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
72724 return -EINVAL;
72725 }
72726
72727-static struct nls_table *find_nls(char *charset)
72728+static struct nls_table *find_nls(const char *charset)
72729 {
72730 struct nls_table *nls;
72731 spin_lock(&nls_lock);
72732@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
72733 return nls;
72734 }
72735
72736-struct nls_table *load_nls(char *charset)
72737+struct nls_table *load_nls(const char *charset)
72738 {
72739 return try_then_request_module(find_nls(charset), "nls_%s", charset);
72740 }
72741diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
72742index 162b3f1..6076a7c 100644
72743--- a/fs/nls/nls_euc-jp.c
72744+++ b/fs/nls/nls_euc-jp.c
72745@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
72746 p_nls = load_nls("cp932");
72747
72748 if (p_nls) {
72749- table.charset2upper = p_nls->charset2upper;
72750- table.charset2lower = p_nls->charset2lower;
72751+ pax_open_kernel();
72752+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
72753+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
72754+ pax_close_kernel();
72755 return register_nls(&table);
72756 }
72757
72758diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
72759index a80a741..7b96e1b 100644
72760--- a/fs/nls/nls_koi8-ru.c
72761+++ b/fs/nls/nls_koi8-ru.c
72762@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
72763 p_nls = load_nls("koi8-u");
72764
72765 if (p_nls) {
72766- table.charset2upper = p_nls->charset2upper;
72767- table.charset2lower = p_nls->charset2lower;
72768+ pax_open_kernel();
72769+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
72770+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
72771+ pax_close_kernel();
72772 return register_nls(&table);
72773 }
72774
72775diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
72776index cf27550..6c70f29d 100644
72777--- a/fs/notify/fanotify/fanotify_user.c
72778+++ b/fs/notify/fanotify/fanotify_user.c
72779@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
72780
72781 fd = fanotify_event_metadata.fd;
72782 ret = -EFAULT;
72783- if (copy_to_user(buf, &fanotify_event_metadata,
72784- fanotify_event_metadata.event_len))
72785+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
72786+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
72787 goto out_close_fd;
72788
72789 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
72790diff --git a/fs/notify/notification.c b/fs/notify/notification.c
72791index a95d8e0..a91a5fd 100644
72792--- a/fs/notify/notification.c
72793+++ b/fs/notify/notification.c
72794@@ -48,7 +48,7 @@
72795 #include <linux/fsnotify_backend.h>
72796 #include "fsnotify.h"
72797
72798-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
72799+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
72800
72801 /**
72802 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
72803@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
72804 */
72805 u32 fsnotify_get_cookie(void)
72806 {
72807- return atomic_inc_return(&fsnotify_sync_cookie);
72808+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
72809 }
72810 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
72811
72812diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
72813index 9e38daf..5727cae 100644
72814--- a/fs/ntfs/dir.c
72815+++ b/fs/ntfs/dir.c
72816@@ -1310,7 +1310,7 @@ find_next_index_buffer:
72817 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
72818 ~(s64)(ndir->itype.index.block_size - 1)));
72819 /* Bounds checks. */
72820- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
72821+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
72822 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
72823 "inode 0x%lx or driver bug.", vdir->i_ino);
72824 goto err_out;
72825diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
72826index 1da9b2d..9cca092a 100644
72827--- a/fs/ntfs/file.c
72828+++ b/fs/ntfs/file.c
72829@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
72830 char *addr;
72831 size_t total = 0;
72832 unsigned len;
72833- int left;
72834+ unsigned left;
72835
72836 do {
72837 len = PAGE_CACHE_SIZE - ofs;
72838diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
72839index 9e1e112..241a52a 100644
72840--- a/fs/ntfs/super.c
72841+++ b/fs/ntfs/super.c
72842@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
72843 if (!silent)
72844 ntfs_error(sb, "Primary boot sector is invalid.");
72845 } else if (!silent)
72846- ntfs_error(sb, read_err_str, "primary");
72847+ ntfs_error(sb, read_err_str, "%s", "primary");
72848 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
72849 if (bh_primary)
72850 brelse(bh_primary);
72851@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
72852 goto hotfix_primary_boot_sector;
72853 brelse(bh_backup);
72854 } else if (!silent)
72855- ntfs_error(sb, read_err_str, "backup");
72856+ ntfs_error(sb, read_err_str, "%s", "backup");
72857 /* Try to read NT3.51- backup boot sector. */
72858 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
72859 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
72860@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
72861 "sector.");
72862 brelse(bh_backup);
72863 } else if (!silent)
72864- ntfs_error(sb, read_err_str, "backup");
72865+ ntfs_error(sb, read_err_str, "%s", "backup");
72866 /* We failed. Cleanup and return. */
72867 if (bh_primary)
72868 brelse(bh_primary);
72869diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
72870index 0440134..d52c93a 100644
72871--- a/fs/ocfs2/localalloc.c
72872+++ b/fs/ocfs2/localalloc.c
72873@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
72874 goto bail;
72875 }
72876
72877- atomic_inc(&osb->alloc_stats.moves);
72878+ atomic_inc_unchecked(&osb->alloc_stats.moves);
72879
72880 bail:
72881 if (handle)
72882diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
72883index 460c6c3..b4ef513 100644
72884--- a/fs/ocfs2/ocfs2.h
72885+++ b/fs/ocfs2/ocfs2.h
72886@@ -247,11 +247,11 @@ enum ocfs2_vol_state
72887
72888 struct ocfs2_alloc_stats
72889 {
72890- atomic_t moves;
72891- atomic_t local_data;
72892- atomic_t bitmap_data;
72893- atomic_t bg_allocs;
72894- atomic_t bg_extends;
72895+ atomic_unchecked_t moves;
72896+ atomic_unchecked_t local_data;
72897+ atomic_unchecked_t bitmap_data;
72898+ atomic_unchecked_t bg_allocs;
72899+ atomic_unchecked_t bg_extends;
72900 };
72901
72902 enum ocfs2_local_alloc_state
72903diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
72904index ee541f9..df3a500 100644
72905--- a/fs/ocfs2/refcounttree.c
72906+++ b/fs/ocfs2/refcounttree.c
72907@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
72908 error = posix_acl_create(dir, &mode, &default_acl, &acl);
72909 if (error) {
72910 mlog_errno(error);
72911- goto out;
72912+ return error;
72913 }
72914
72915 error = ocfs2_create_inode_in_orphan(dir, mode,
72916diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
72917index 0cb889a..6a26b24 100644
72918--- a/fs/ocfs2/suballoc.c
72919+++ b/fs/ocfs2/suballoc.c
72920@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
72921 mlog_errno(status);
72922 goto bail;
72923 }
72924- atomic_inc(&osb->alloc_stats.bg_extends);
72925+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
72926
72927 /* You should never ask for this much metadata */
72928 BUG_ON(bits_wanted >
72929@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
72930 mlog_errno(status);
72931 goto bail;
72932 }
72933- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
72934+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
72935
72936 *suballoc_loc = res.sr_bg_blkno;
72937 *suballoc_bit_start = res.sr_bit_offset;
72938@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
72939 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
72940 res->sr_bits);
72941
72942- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
72943+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
72944
72945 BUG_ON(res->sr_bits != 1);
72946
72947@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
72948 mlog_errno(status);
72949 goto bail;
72950 }
72951- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
72952+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
72953
72954 BUG_ON(res.sr_bits != 1);
72955
72956@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
72957 cluster_start,
72958 num_clusters);
72959 if (!status)
72960- atomic_inc(&osb->alloc_stats.local_data);
72961+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
72962 } else {
72963 if (min_clusters > (osb->bitmap_cpg - 1)) {
72964 /* The only paths asking for contiguousness
72965@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
72966 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
72967 res.sr_bg_blkno,
72968 res.sr_bit_offset);
72969- atomic_inc(&osb->alloc_stats.bitmap_data);
72970+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
72971 *num_clusters = res.sr_bits;
72972 }
72973 }
72974diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
72975index 2667518..24bcf79 100644
72976--- a/fs/ocfs2/super.c
72977+++ b/fs/ocfs2/super.c
72978@@ -308,11 +308,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
72979 "%10s => GlobalAllocs: %d LocalAllocs: %d "
72980 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
72981 "Stats",
72982- atomic_read(&osb->alloc_stats.bitmap_data),
72983- atomic_read(&osb->alloc_stats.local_data),
72984- atomic_read(&osb->alloc_stats.bg_allocs),
72985- atomic_read(&osb->alloc_stats.moves),
72986- atomic_read(&osb->alloc_stats.bg_extends));
72987+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
72988+ atomic_read_unchecked(&osb->alloc_stats.local_data),
72989+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
72990+ atomic_read_unchecked(&osb->alloc_stats.moves),
72991+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
72992
72993 out += snprintf(buf + out, len - out,
72994 "%10s => State: %u Descriptor: %llu Size: %u bits "
72995@@ -2093,11 +2093,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
72996
72997 mutex_init(&osb->system_file_mutex);
72998
72999- atomic_set(&osb->alloc_stats.moves, 0);
73000- atomic_set(&osb->alloc_stats.local_data, 0);
73001- atomic_set(&osb->alloc_stats.bitmap_data, 0);
73002- atomic_set(&osb->alloc_stats.bg_allocs, 0);
73003- atomic_set(&osb->alloc_stats.bg_extends, 0);
73004+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
73005+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
73006+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
73007+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
73008+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
73009
73010 /* Copy the blockcheck stats from the superblock probe */
73011 osb->osb_ecc_stats = *stats;
73012diff --git a/fs/open.c b/fs/open.c
73013index 44a3be1..5e97aa1 100644
73014--- a/fs/open.c
73015+++ b/fs/open.c
73016@@ -32,6 +32,8 @@
73017 #include <linux/dnotify.h>
73018 #include <linux/compat.h>
73019
73020+#define CREATE_TRACE_POINTS
73021+#include <trace/events/fs.h>
73022 #include "internal.h"
73023
73024 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
73025@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
73026 error = locks_verify_truncate(inode, NULL, length);
73027 if (!error)
73028 error = security_path_truncate(path);
73029+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
73030+ error = -EACCES;
73031 if (!error)
73032 error = do_truncate(path->dentry, length, 0, NULL);
73033
73034@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
73035 error = locks_verify_truncate(inode, f.file, length);
73036 if (!error)
73037 error = security_path_truncate(&f.file->f_path);
73038+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
73039+ error = -EACCES;
73040 if (!error)
73041 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
73042 sb_end_write(inode->i_sb);
73043@@ -392,6 +398,9 @@ retry:
73044 if (__mnt_is_readonly(path.mnt))
73045 res = -EROFS;
73046
73047+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
73048+ res = -EACCES;
73049+
73050 out_path_release:
73051 path_put(&path);
73052 if (retry_estale(res, lookup_flags)) {
73053@@ -423,6 +432,8 @@ retry:
73054 if (error)
73055 goto dput_and_out;
73056
73057+ gr_log_chdir(path.dentry, path.mnt);
73058+
73059 set_fs_pwd(current->fs, &path);
73060
73061 dput_and_out:
73062@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
73063 goto out_putf;
73064
73065 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
73066+
73067+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
73068+ error = -EPERM;
73069+
73070+ if (!error)
73071+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
73072+
73073 if (!error)
73074 set_fs_pwd(current->fs, &f.file->f_path);
73075 out_putf:
73076@@ -481,7 +499,13 @@ retry:
73077 if (error)
73078 goto dput_and_out;
73079
73080+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
73081+ goto dput_and_out;
73082+
73083 set_fs_root(current->fs, &path);
73084+
73085+ gr_handle_chroot_chdir(&path);
73086+
73087 error = 0;
73088 dput_and_out:
73089 path_put(&path);
73090@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
73091 return error;
73092 retry_deleg:
73093 mutex_lock(&inode->i_mutex);
73094+
73095+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
73096+ error = -EACCES;
73097+ goto out_unlock;
73098+ }
73099+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
73100+ error = -EACCES;
73101+ goto out_unlock;
73102+ }
73103+
73104 error = security_path_chmod(path, mode);
73105 if (error)
73106 goto out_unlock;
73107@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
73108 uid = make_kuid(current_user_ns(), user);
73109 gid = make_kgid(current_user_ns(), group);
73110
73111+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
73112+ return -EACCES;
73113+
73114 retry_deleg:
73115 newattrs.ia_valid = ATTR_CTIME;
73116 if (user != (uid_t) -1) {
73117@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
73118 } else {
73119 fsnotify_open(f);
73120 fd_install(fd, f);
73121+ trace_do_sys_open(tmp->name, flags, mode);
73122 }
73123 }
73124 putname(tmp);
73125diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
73126index 5f0d199..13b74b9 100644
73127--- a/fs/overlayfs/super.c
73128+++ b/fs/overlayfs/super.c
73129@@ -172,7 +172,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
73130 {
73131 struct ovl_entry *oe = dentry->d_fsdata;
73132
73133- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
73134+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
73135 }
73136
73137 int ovl_want_write(struct dentry *dentry)
73138@@ -816,8 +816,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
73139
73140 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
73141 {
73142- struct path upperpath = { NULL, NULL };
73143- struct path workpath = { NULL, NULL };
73144+ struct path upperpath = { .dentry = NULL, .mnt = NULL };
73145+ struct path workpath = { .dentry = NULL, .mnt = NULL };
73146 struct dentry *root_dentry;
73147 struct ovl_entry *oe;
73148 struct ovl_fs *ufs;
73149diff --git a/fs/pipe.c b/fs/pipe.c
73150index 21981e5..2c0bffb 100644
73151--- a/fs/pipe.c
73152+++ b/fs/pipe.c
73153@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
73154 /*
73155 * Minimum pipe size, as required by POSIX
73156 */
73157-unsigned int pipe_min_size = PAGE_SIZE;
73158+unsigned int pipe_min_size __read_only = PAGE_SIZE;
73159
73160 /*
73161 * We use a start+len construction, which provides full use of the
73162@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
73163
73164 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
73165 {
73166- if (pipe->files)
73167+ if (atomic_read(&pipe->files))
73168 mutex_lock_nested(&pipe->mutex, subclass);
73169 }
73170
73171@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
73172
73173 void pipe_unlock(struct pipe_inode_info *pipe)
73174 {
73175- if (pipe->files)
73176+ if (atomic_read(&pipe->files))
73177 mutex_unlock(&pipe->mutex);
73178 }
73179 EXPORT_SYMBOL(pipe_unlock);
73180@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
73181 }
73182 if (bufs) /* More to do? */
73183 continue;
73184- if (!pipe->writers)
73185+ if (!atomic_read(&pipe->writers))
73186 break;
73187- if (!pipe->waiting_writers) {
73188+ if (!atomic_read(&pipe->waiting_writers)) {
73189 /* syscall merging: Usually we must not sleep
73190 * if O_NONBLOCK is set, or if we got some data.
73191 * But if a writer sleeps in kernel space, then
73192@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73193
73194 __pipe_lock(pipe);
73195
73196- if (!pipe->readers) {
73197+ if (!atomic_read(&pipe->readers)) {
73198 send_sig(SIGPIPE, current, 0);
73199 ret = -EPIPE;
73200 goto out;
73201@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73202 for (;;) {
73203 int bufs;
73204
73205- if (!pipe->readers) {
73206+ if (!atomic_read(&pipe->readers)) {
73207 send_sig(SIGPIPE, current, 0);
73208 if (!ret)
73209 ret = -EPIPE;
73210@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73211 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
73212 do_wakeup = 0;
73213 }
73214- pipe->waiting_writers++;
73215+ atomic_inc(&pipe->waiting_writers);
73216 pipe_wait(pipe);
73217- pipe->waiting_writers--;
73218+ atomic_dec(&pipe->waiting_writers);
73219 }
73220 out:
73221 __pipe_unlock(pipe);
73222@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
73223 mask = 0;
73224 if (filp->f_mode & FMODE_READ) {
73225 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
73226- if (!pipe->writers && filp->f_version != pipe->w_counter)
73227+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
73228 mask |= POLLHUP;
73229 }
73230
73231@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
73232 * Most Unices do not set POLLERR for FIFOs but on Linux they
73233 * behave exactly like pipes for poll().
73234 */
73235- if (!pipe->readers)
73236+ if (!atomic_read(&pipe->readers))
73237 mask |= POLLERR;
73238 }
73239
73240@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
73241 int kill = 0;
73242
73243 spin_lock(&inode->i_lock);
73244- if (!--pipe->files) {
73245+ if (atomic_dec_and_test(&pipe->files)) {
73246 inode->i_pipe = NULL;
73247 kill = 1;
73248 }
73249@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
73250
73251 __pipe_lock(pipe);
73252 if (file->f_mode & FMODE_READ)
73253- pipe->readers--;
73254+ atomic_dec(&pipe->readers);
73255 if (file->f_mode & FMODE_WRITE)
73256- pipe->writers--;
73257+ atomic_dec(&pipe->writers);
73258
73259- if (pipe->readers || pipe->writers) {
73260+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
73261 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
73262 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
73263 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
73264@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
73265 kfree(pipe);
73266 }
73267
73268-static struct vfsmount *pipe_mnt __read_mostly;
73269+struct vfsmount *pipe_mnt __read_mostly;
73270
73271 /*
73272 * pipefs_dname() is called from d_path().
73273@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
73274 goto fail_iput;
73275
73276 inode->i_pipe = pipe;
73277- pipe->files = 2;
73278- pipe->readers = pipe->writers = 1;
73279+ atomic_set(&pipe->files, 2);
73280+ atomic_set(&pipe->readers, 1);
73281+ atomic_set(&pipe->writers, 1);
73282 inode->i_fop = &pipefifo_fops;
73283
73284 /*
73285@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
73286 spin_lock(&inode->i_lock);
73287 if (inode->i_pipe) {
73288 pipe = inode->i_pipe;
73289- pipe->files++;
73290+ atomic_inc(&pipe->files);
73291 spin_unlock(&inode->i_lock);
73292 } else {
73293 spin_unlock(&inode->i_lock);
73294 pipe = alloc_pipe_info();
73295 if (!pipe)
73296 return -ENOMEM;
73297- pipe->files = 1;
73298+ atomic_set(&pipe->files, 1);
73299 spin_lock(&inode->i_lock);
73300 if (unlikely(inode->i_pipe)) {
73301- inode->i_pipe->files++;
73302+ atomic_inc(&inode->i_pipe->files);
73303 spin_unlock(&inode->i_lock);
73304 free_pipe_info(pipe);
73305 pipe = inode->i_pipe;
73306@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
73307 * opened, even when there is no process writing the FIFO.
73308 */
73309 pipe->r_counter++;
73310- if (pipe->readers++ == 0)
73311+ if (atomic_inc_return(&pipe->readers) == 1)
73312 wake_up_partner(pipe);
73313
73314- if (!is_pipe && !pipe->writers) {
73315+ if (!is_pipe && !atomic_read(&pipe->writers)) {
73316 if ((filp->f_flags & O_NONBLOCK)) {
73317 /* suppress POLLHUP until we have
73318 * seen a writer */
73319@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
73320 * errno=ENXIO when there is no process reading the FIFO.
73321 */
73322 ret = -ENXIO;
73323- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
73324+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
73325 goto err;
73326
73327 pipe->w_counter++;
73328- if (!pipe->writers++)
73329+ if (atomic_inc_return(&pipe->writers) == 1)
73330 wake_up_partner(pipe);
73331
73332- if (!is_pipe && !pipe->readers) {
73333+ if (!is_pipe && !atomic_read(&pipe->readers)) {
73334 if (wait_for_partner(pipe, &pipe->r_counter))
73335 goto err_wr;
73336 }
73337@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
73338 * the process can at least talk to itself.
73339 */
73340
73341- pipe->readers++;
73342- pipe->writers++;
73343+ atomic_inc(&pipe->readers);
73344+ atomic_inc(&pipe->writers);
73345 pipe->r_counter++;
73346 pipe->w_counter++;
73347- if (pipe->readers == 1 || pipe->writers == 1)
73348+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
73349 wake_up_partner(pipe);
73350 break;
73351
73352@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
73353 return 0;
73354
73355 err_rd:
73356- if (!--pipe->readers)
73357+ if (atomic_dec_and_test(&pipe->readers))
73358 wake_up_interruptible(&pipe->wait);
73359 ret = -ERESTARTSYS;
73360 goto err;
73361
73362 err_wr:
73363- if (!--pipe->writers)
73364+ if (atomic_dec_and_test(&pipe->writers))
73365 wake_up_interruptible(&pipe->wait);
73366 ret = -ERESTARTSYS;
73367 goto err;
73368@@ -1010,7 +1011,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
73369 * Currently we rely on the pipe array holding a power-of-2 number
73370 * of pages.
73371 */
73372-static inline unsigned int round_pipe_size(unsigned int size)
73373+static inline unsigned long round_pipe_size(unsigned long size)
73374 {
73375 unsigned long nr_pages;
73376
73377@@ -1058,13 +1059,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
73378
73379 switch (cmd) {
73380 case F_SETPIPE_SZ: {
73381- unsigned int size, nr_pages;
73382+ unsigned long size, nr_pages;
73383+
73384+ ret = -EINVAL;
73385+ if (arg < pipe_min_size)
73386+ goto out;
73387
73388 size = round_pipe_size(arg);
73389 nr_pages = size >> PAGE_SHIFT;
73390
73391- ret = -EINVAL;
73392- if (!nr_pages)
73393+ if (size < pipe_min_size)
73394 goto out;
73395
73396 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
73397diff --git a/fs/posix_acl.c b/fs/posix_acl.c
73398index 3a48bb7..403067b 100644
73399--- a/fs/posix_acl.c
73400+++ b/fs/posix_acl.c
73401@@ -20,6 +20,7 @@
73402 #include <linux/xattr.h>
73403 #include <linux/export.h>
73404 #include <linux/user_namespace.h>
73405+#include <linux/grsecurity.h>
73406
73407 struct posix_acl **acl_by_type(struct inode *inode, int type)
73408 {
73409@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
73410 }
73411 }
73412 if (mode_p)
73413- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
73414+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
73415 return not_equiv;
73416 }
73417 EXPORT_SYMBOL(posix_acl_equiv_mode);
73418@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
73419 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
73420 }
73421
73422- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
73423+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
73424 return not_equiv;
73425 }
73426
73427@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
73428 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
73429 int err = -ENOMEM;
73430 if (clone) {
73431+ *mode_p &= ~gr_acl_umask();
73432+
73433 err = posix_acl_create_masq(clone, mode_p);
73434 if (err < 0) {
73435 posix_acl_release(clone);
73436@@ -663,11 +666,12 @@ struct posix_acl *
73437 posix_acl_from_xattr(struct user_namespace *user_ns,
73438 const void *value, size_t size)
73439 {
73440- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73441- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73442+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73443+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73444 int count;
73445 struct posix_acl *acl;
73446 struct posix_acl_entry *acl_e;
73447+ umode_t umask = gr_acl_umask();
73448
73449 if (!value)
73450 return NULL;
73451@@ -693,12 +697,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
73452
73453 switch(acl_e->e_tag) {
73454 case ACL_USER_OBJ:
73455+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
73456+ break;
73457 case ACL_GROUP_OBJ:
73458 case ACL_MASK:
73459+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
73460+ break;
73461 case ACL_OTHER:
73462+ acl_e->e_perm &= ~(umask & S_IRWXO);
73463 break;
73464
73465 case ACL_USER:
73466+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
73467 acl_e->e_uid =
73468 make_kuid(user_ns,
73469 le32_to_cpu(entry->e_id));
73470@@ -706,6 +716,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
73471 goto fail;
73472 break;
73473 case ACL_GROUP:
73474+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
73475 acl_e->e_gid =
73476 make_kgid(user_ns,
73477 le32_to_cpu(entry->e_id));
73478diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
73479index 2183fcf..3c32a98 100644
73480--- a/fs/proc/Kconfig
73481+++ b/fs/proc/Kconfig
73482@@ -30,7 +30,7 @@ config PROC_FS
73483
73484 config PROC_KCORE
73485 bool "/proc/kcore support" if !ARM
73486- depends on PROC_FS && MMU
73487+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
73488 help
73489 Provides a virtual ELF core file of the live kernel. This can
73490 be read with gdb and other ELF tools. No modifications can be
73491@@ -38,8 +38,8 @@ config PROC_KCORE
73492
73493 config PROC_VMCORE
73494 bool "/proc/vmcore support"
73495- depends on PROC_FS && CRASH_DUMP
73496- default y
73497+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
73498+ default n
73499 help
73500 Exports the dump image of crashed kernel in ELF format.
73501
73502@@ -63,8 +63,8 @@ config PROC_SYSCTL
73503 limited in memory.
73504
73505 config PROC_PAGE_MONITOR
73506- default y
73507- depends on PROC_FS && MMU
73508+ default n
73509+ depends on PROC_FS && MMU && !GRKERNSEC
73510 bool "Enable /proc page monitoring" if EXPERT
73511 help
73512 Various /proc files exist to monitor process memory utilization:
73513diff --git a/fs/proc/array.c b/fs/proc/array.c
73514index 1295a00..4c91a6b 100644
73515--- a/fs/proc/array.c
73516+++ b/fs/proc/array.c
73517@@ -60,6 +60,7 @@
73518 #include <linux/tty.h>
73519 #include <linux/string.h>
73520 #include <linux/mman.h>
73521+#include <linux/grsecurity.h>
73522 #include <linux/proc_fs.h>
73523 #include <linux/ioport.h>
73524 #include <linux/uaccess.h>
73525@@ -322,6 +323,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
73526 cpumask_pr_args(&task->cpus_allowed));
73527 }
73528
73529+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73530+static inline void task_pax(struct seq_file *m, struct task_struct *p)
73531+{
73532+ if (p->mm)
73533+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
73534+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
73535+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
73536+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
73537+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
73538+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
73539+ else
73540+ seq_printf(m, "PaX:\t-----\n");
73541+}
73542+#endif
73543+
73544 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
73545 struct pid *pid, struct task_struct *task)
73546 {
73547@@ -340,9 +356,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
73548 task_cpus_allowed(m, task);
73549 cpuset_task_status_allowed(m, task);
73550 task_context_switch_counts(m, task);
73551+
73552+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73553+ task_pax(m, task);
73554+#endif
73555+
73556+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
73557+ task_grsec_rbac(m, task);
73558+#endif
73559+
73560 return 0;
73561 }
73562
73563+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73564+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
73565+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
73566+ _mm->pax_flags & MF_PAX_SEGMEXEC))
73567+#endif
73568+
73569 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73570 struct pid *pid, struct task_struct *task, int whole)
73571 {
73572@@ -364,6 +395,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73573 char tcomm[sizeof(task->comm)];
73574 unsigned long flags;
73575
73576+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73577+ if (current->exec_id != m->exec_id) {
73578+ gr_log_badprocpid("stat");
73579+ return 0;
73580+ }
73581+#endif
73582+
73583 state = *get_task_state(task);
73584 vsize = eip = esp = 0;
73585 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
73586@@ -434,6 +472,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73587 gtime = task_gtime(task);
73588 }
73589
73590+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73591+ if (PAX_RAND_FLAGS(mm)) {
73592+ eip = 0;
73593+ esp = 0;
73594+ wchan = 0;
73595+ }
73596+#endif
73597+#ifdef CONFIG_GRKERNSEC_HIDESYM
73598+ wchan = 0;
73599+ eip =0;
73600+ esp =0;
73601+#endif
73602+
73603 /* scale priority and nice values from timeslices to -20..20 */
73604 /* to make it look like a "normal" Unix priority/nice value */
73605 priority = task_prio(task);
73606@@ -465,9 +516,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73607 seq_put_decimal_ull(m, ' ', vsize);
73608 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
73609 seq_put_decimal_ull(m, ' ', rsslim);
73610+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73611+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
73612+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
73613+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
73614+#else
73615 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
73616 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
73617 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
73618+#endif
73619 seq_put_decimal_ull(m, ' ', esp);
73620 seq_put_decimal_ull(m, ' ', eip);
73621 /* The signal information here is obsolete.
73622@@ -489,7 +546,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73623 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
73624 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
73625
73626- if (mm && permitted) {
73627+ if (mm && permitted
73628+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73629+ && !PAX_RAND_FLAGS(mm)
73630+#endif
73631+ ) {
73632 seq_put_decimal_ull(m, ' ', mm->start_data);
73633 seq_put_decimal_ull(m, ' ', mm->end_data);
73634 seq_put_decimal_ull(m, ' ', mm->start_brk);
73635@@ -527,8 +588,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
73636 struct pid *pid, struct task_struct *task)
73637 {
73638 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
73639- struct mm_struct *mm = get_task_mm(task);
73640+ struct mm_struct *mm;
73641
73642+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73643+ if (current->exec_id != m->exec_id) {
73644+ gr_log_badprocpid("statm");
73645+ return 0;
73646+ }
73647+#endif
73648+ mm = get_task_mm(task);
73649 if (mm) {
73650 size = task_statm(mm, &shared, &text, &data, &resident);
73651 mmput(mm);
73652@@ -551,6 +619,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
73653 return 0;
73654 }
73655
73656+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
73657+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
73658+{
73659+ unsigned long flags;
73660+ u32 curr_ip = 0;
73661+
73662+ if (lock_task_sighand(task, &flags)) {
73663+ curr_ip = task->signal->curr_ip;
73664+ unlock_task_sighand(task, &flags);
73665+ }
73666+ return seq_printf(m, "%pI4\n", &curr_ip);
73667+}
73668+#endif
73669+
73670 #ifdef CONFIG_CHECKPOINT_RESTORE
73671 static struct pid *
73672 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
73673diff --git a/fs/proc/base.c b/fs/proc/base.c
73674index 3f3d7ae..68de109 100644
73675--- a/fs/proc/base.c
73676+++ b/fs/proc/base.c
73677@@ -113,6 +113,14 @@ struct pid_entry {
73678 union proc_op op;
73679 };
73680
73681+struct getdents_callback {
73682+ struct linux_dirent __user * current_dir;
73683+ struct linux_dirent __user * previous;
73684+ struct file * file;
73685+ int count;
73686+ int error;
73687+};
73688+
73689 #define NOD(NAME, MODE, IOP, FOP, OP) { \
73690 .name = (NAME), \
73691 .len = sizeof(NAME) - 1, \
73692@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
73693 return 0;
73694 }
73695
73696+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73697+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
73698+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
73699+ _mm->pax_flags & MF_PAX_SEGMEXEC))
73700+#endif
73701+
73702 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
73703 struct pid *pid, struct task_struct *task)
73704 {
73705 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
73706 if (mm && !IS_ERR(mm)) {
73707 unsigned int nwords = 0;
73708+
73709+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73710+ /* allow if we're currently ptracing this task */
73711+ if (PAX_RAND_FLAGS(mm) &&
73712+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
73713+ mmput(mm);
73714+ return 0;
73715+ }
73716+#endif
73717+
73718 do {
73719 nwords += 2;
73720 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
73721@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
73722 }
73723
73724
73725-#ifdef CONFIG_KALLSYMS
73726+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73727 /*
73728 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
73729 * Returns the resolved symbol. If that fails, simply return the address.
73730@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
73731 mutex_unlock(&task->signal->cred_guard_mutex);
73732 }
73733
73734-#ifdef CONFIG_STACKTRACE
73735+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73736
73737 #define MAX_STACK_TRACE_DEPTH 64
73738
73739@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
73740 return 0;
73741 }
73742
73743-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
73744+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
73745 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
73746 struct pid *pid, struct task_struct *task)
73747 {
73748@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
73749 /************************************************************************/
73750
73751 /* permission checks */
73752-static int proc_fd_access_allowed(struct inode *inode)
73753+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
73754 {
73755 struct task_struct *task;
73756 int allowed = 0;
73757@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
73758 */
73759 task = get_proc_task(inode);
73760 if (task) {
73761- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
73762+ if (log)
73763+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
73764+ else
73765+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
73766 put_task_struct(task);
73767 }
73768 return allowed;
73769@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
73770 struct task_struct *task,
73771 int hide_pid_min)
73772 {
73773+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
73774+ return false;
73775+
73776+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73777+ rcu_read_lock();
73778+ {
73779+ const struct cred *tmpcred = current_cred();
73780+ const struct cred *cred = __task_cred(task);
73781+
73782+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
73783+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73784+ || in_group_p(grsec_proc_gid)
73785+#endif
73786+ ) {
73787+ rcu_read_unlock();
73788+ return true;
73789+ }
73790+ }
73791+ rcu_read_unlock();
73792+
73793+ if (!pid->hide_pid)
73794+ return false;
73795+#endif
73796+
73797 if (pid->hide_pid < hide_pid_min)
73798 return true;
73799 if (in_group_p(pid->pid_gid))
73800 return true;
73801+
73802 return ptrace_may_access(task, PTRACE_MODE_READ);
73803 }
73804
73805@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
73806 put_task_struct(task);
73807
73808 if (!has_perms) {
73809+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73810+ {
73811+#else
73812 if (pid->hide_pid == 2) {
73813+#endif
73814 /*
73815 * Let's make getdents(), stat(), and open()
73816 * consistent with each other. If a process
73817@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
73818
73819 if (task) {
73820 mm = mm_access(task, mode);
73821+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
73822+ mmput(mm);
73823+ mm = ERR_PTR(-EPERM);
73824+ }
73825 put_task_struct(task);
73826
73827 if (!IS_ERR_OR_NULL(mm)) {
73828@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
73829 return PTR_ERR(mm);
73830
73831 file->private_data = mm;
73832+
73833+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73834+ file->f_version = current->exec_id;
73835+#endif
73836+
73837 return 0;
73838 }
73839
73840@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
73841 ssize_t copied;
73842 char *page;
73843
73844+#ifdef CONFIG_GRKERNSEC
73845+ if (write)
73846+ return -EPERM;
73847+#endif
73848+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73849+ if (file->f_version != current->exec_id) {
73850+ gr_log_badprocpid("mem");
73851+ return 0;
73852+ }
73853+#endif
73854+
73855 if (!mm)
73856 return 0;
73857
73858@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
73859 goto free;
73860
73861 while (count > 0) {
73862- int this_len = min_t(int, count, PAGE_SIZE);
73863+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
73864
73865 if (write && copy_from_user(page, buf, this_len)) {
73866 copied = -EFAULT;
73867@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
73868 if (!mm)
73869 return 0;
73870
73871+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73872+ if (file->f_version != current->exec_id) {
73873+ gr_log_badprocpid("environ");
73874+ return 0;
73875+ }
73876+#endif
73877+
73878 page = (char *)__get_free_page(GFP_TEMPORARY);
73879 if (!page)
73880 return -ENOMEM;
73881@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
73882 goto free;
73883 while (count > 0) {
73884 size_t this_len, max_len;
73885- int retval;
73886+ ssize_t retval;
73887
73888 if (src >= (mm->env_end - mm->env_start))
73889 break;
73890@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
73891 int error = -EACCES;
73892
73893 /* Are we allowed to snoop on the tasks file descriptors? */
73894- if (!proc_fd_access_allowed(inode))
73895+ if (!proc_fd_access_allowed(inode, 0))
73896 goto out;
73897
73898 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
73899@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
73900 struct path path;
73901
73902 /* Are we allowed to snoop on the tasks file descriptors? */
73903- if (!proc_fd_access_allowed(inode))
73904- goto out;
73905+ /* logging this is needed for learning on chromium to work properly,
73906+ but we don't want to flood the logs from 'ps' which does a readlink
73907+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
73908+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
73909+ */
73910+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
73911+ if (!proc_fd_access_allowed(inode,0))
73912+ goto out;
73913+ } else {
73914+ if (!proc_fd_access_allowed(inode,1))
73915+ goto out;
73916+ }
73917
73918 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
73919 if (error)
73920@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
73921 rcu_read_lock();
73922 cred = __task_cred(task);
73923 inode->i_uid = cred->euid;
73924+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73925+ inode->i_gid = grsec_proc_gid;
73926+#else
73927 inode->i_gid = cred->egid;
73928+#endif
73929 rcu_read_unlock();
73930 }
73931 security_task_to_inode(task, inode);
73932@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
73933 return -ENOENT;
73934 }
73935 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
73936+#ifdef CONFIG_GRKERNSEC_PROC_USER
73937+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
73938+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73939+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
73940+#endif
73941 task_dumpable(task)) {
73942 cred = __task_cred(task);
73943 stat->uid = cred->euid;
73944+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73945+ stat->gid = grsec_proc_gid;
73946+#else
73947 stat->gid = cred->egid;
73948+#endif
73949 }
73950 }
73951 rcu_read_unlock();
73952@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
73953
73954 if (task) {
73955 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
73956+#ifdef CONFIG_GRKERNSEC_PROC_USER
73957+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
73958+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73959+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
73960+#endif
73961 task_dumpable(task)) {
73962 rcu_read_lock();
73963 cred = __task_cred(task);
73964 inode->i_uid = cred->euid;
73965+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
73966+ inode->i_gid = grsec_proc_gid;
73967+#else
73968 inode->i_gid = cred->egid;
73969+#endif
73970 rcu_read_unlock();
73971 } else {
73972 inode->i_uid = GLOBAL_ROOT_UID;
73973@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
73974 if (!task)
73975 goto out_no_task;
73976
73977+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
73978+ goto out;
73979+
73980 /*
73981 * Yes, it does not scale. And it should not. Don't add
73982 * new entries into /proc/<tgid>/ without very good reasons.
73983@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
73984 if (!task)
73985 return -ENOENT;
73986
73987+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
73988+ goto out;
73989+
73990 if (!dir_emit_dots(file, ctx))
73991 goto out;
73992
73993@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
73994 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
73995 #endif
73996 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
73997-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
73998+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
73999 ONE("syscall", S_IRUSR, proc_pid_syscall),
74000 #endif
74001 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
74002@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
74003 #ifdef CONFIG_SECURITY
74004 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
74005 #endif
74006-#ifdef CONFIG_KALLSYMS
74007+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74008 ONE("wchan", S_IRUGO, proc_pid_wchan),
74009 #endif
74010-#ifdef CONFIG_STACKTRACE
74011+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74012 ONE("stack", S_IRUSR, proc_pid_stack),
74013 #endif
74014 #ifdef CONFIG_SCHEDSTATS
74015@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
74016 #ifdef CONFIG_HARDWALL
74017 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
74018 #endif
74019+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
74020+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
74021+#endif
74022 #ifdef CONFIG_USER_NS
74023 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
74024 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
74025@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
74026 if (!inode)
74027 goto out;
74028
74029+#ifdef CONFIG_GRKERNSEC_PROC_USER
74030+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
74031+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74032+ inode->i_gid = grsec_proc_gid;
74033+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
74034+#else
74035 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
74036+#endif
74037 inode->i_op = &proc_tgid_base_inode_operations;
74038 inode->i_fop = &proc_tgid_base_operations;
74039 inode->i_flags|=S_IMMUTABLE;
74040@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
74041 if (!task)
74042 goto out;
74043
74044+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74045+ goto out_put_task;
74046+
74047 result = proc_pid_instantiate(dir, dentry, task, NULL);
74048+out_put_task:
74049 put_task_struct(task);
74050 out:
74051 return ERR_PTR(result);
74052@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
74053 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
74054 #endif
74055 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
74056-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
74057+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
74058 ONE("syscall", S_IRUSR, proc_pid_syscall),
74059 #endif
74060 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
74061@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
74062 #ifdef CONFIG_SECURITY
74063 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
74064 #endif
74065-#ifdef CONFIG_KALLSYMS
74066+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74067 ONE("wchan", S_IRUGO, proc_pid_wchan),
74068 #endif
74069-#ifdef CONFIG_STACKTRACE
74070+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74071 ONE("stack", S_IRUSR, proc_pid_stack),
74072 #endif
74073 #ifdef CONFIG_SCHEDSTATS
74074diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
74075index cbd82df..c0407d2 100644
74076--- a/fs/proc/cmdline.c
74077+++ b/fs/proc/cmdline.c
74078@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
74079
74080 static int __init proc_cmdline_init(void)
74081 {
74082+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74083+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
74084+#else
74085 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
74086+#endif
74087 return 0;
74088 }
74089 fs_initcall(proc_cmdline_init);
74090diff --git a/fs/proc/devices.c b/fs/proc/devices.c
74091index 50493ed..248166b 100644
74092--- a/fs/proc/devices.c
74093+++ b/fs/proc/devices.c
74094@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
74095
74096 static int __init proc_devices_init(void)
74097 {
74098+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74099+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
74100+#else
74101 proc_create("devices", 0, NULL, &proc_devinfo_operations);
74102+#endif
74103 return 0;
74104 }
74105 fs_initcall(proc_devices_init);
74106diff --git a/fs/proc/fd.c b/fs/proc/fd.c
74107index 8e5ad83..1f07a8c 100644
74108--- a/fs/proc/fd.c
74109+++ b/fs/proc/fd.c
74110@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
74111 if (!task)
74112 return -ENOENT;
74113
74114- files = get_files_struct(task);
74115+ if (!gr_acl_handle_procpidmem(task))
74116+ files = get_files_struct(task);
74117 put_task_struct(task);
74118
74119 if (files) {
74120@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
74121 */
74122 int proc_fd_permission(struct inode *inode, int mask)
74123 {
74124+ struct task_struct *task;
74125 int rv = generic_permission(inode, mask);
74126- if (rv == 0)
74127- return 0;
74128+
74129 if (task_tgid(current) == proc_pid(inode))
74130 rv = 0;
74131+
74132+ task = get_proc_task(inode);
74133+ if (task == NULL)
74134+ return rv;
74135+
74136+ if (gr_acl_handle_procpidmem(task))
74137+ rv = -EACCES;
74138+
74139+ put_task_struct(task);
74140+
74141 return rv;
74142 }
74143
74144diff --git a/fs/proc/generic.c b/fs/proc/generic.c
74145index be65b20..2998ba8 100644
74146--- a/fs/proc/generic.c
74147+++ b/fs/proc/generic.c
74148@@ -22,6 +22,7 @@
74149 #include <linux/bitops.h>
74150 #include <linux/spinlock.h>
74151 #include <linux/completion.h>
74152+#include <linux/grsecurity.h>
74153 #include <asm/uaccess.h>
74154
74155 #include "internal.h"
74156@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
74157 return proc_lookup_de(PDE(dir), dir, dentry);
74158 }
74159
74160+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
74161+ unsigned int flags)
74162+{
74163+ if (gr_proc_is_restricted())
74164+ return ERR_PTR(-EACCES);
74165+
74166+ return proc_lookup_de(PDE(dir), dir, dentry);
74167+}
74168+
74169 /*
74170 * This returns non-zero if at EOF, so that the /proc
74171 * root directory can use this and check if it should
74172@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
74173 return proc_readdir_de(PDE(inode), file, ctx);
74174 }
74175
74176+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
74177+{
74178+ struct inode *inode = file_inode(file);
74179+
74180+ if (gr_proc_is_restricted())
74181+ return -EACCES;
74182+
74183+ return proc_readdir_de(PDE(inode), file, ctx);
74184+}
74185+
74186 /*
74187 * These are the generic /proc directory operations. They
74188 * use the in-memory "struct proc_dir_entry" tree to parse
74189@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
74190 .iterate = proc_readdir,
74191 };
74192
74193+static const struct file_operations proc_dir_restricted_operations = {
74194+ .llseek = generic_file_llseek,
74195+ .read = generic_read_dir,
74196+ .iterate = proc_readdir_restrict,
74197+};
74198+
74199 /*
74200 * proc directories can do almost nothing..
74201 */
74202@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
74203 .setattr = proc_notify_change,
74204 };
74205
74206+static const struct inode_operations proc_dir_restricted_inode_operations = {
74207+ .lookup = proc_lookup_restrict,
74208+ .getattr = proc_getattr,
74209+ .setattr = proc_notify_change,
74210+};
74211+
74212 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
74213 {
74214 int ret;
74215@@ -441,6 +473,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
74216 }
74217 EXPORT_SYMBOL_GPL(proc_mkdir_data);
74218
74219+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
74220+ struct proc_dir_entry *parent, void *data)
74221+{
74222+ struct proc_dir_entry *ent;
74223+
74224+ if (mode == 0)
74225+ mode = S_IRUGO | S_IXUGO;
74226+
74227+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
74228+ if (ent) {
74229+ ent->data = data;
74230+ ent->restricted = 1;
74231+ ent->proc_fops = &proc_dir_restricted_operations;
74232+ ent->proc_iops = &proc_dir_restricted_inode_operations;
74233+ parent->nlink++;
74234+ if (proc_register(parent, ent) < 0) {
74235+ kfree(ent);
74236+ parent->nlink--;
74237+ ent = NULL;
74238+ }
74239+ }
74240+ return ent;
74241+}
74242+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
74243+
74244 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
74245 struct proc_dir_entry *parent)
74246 {
74247@@ -455,6 +512,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
74248 }
74249 EXPORT_SYMBOL(proc_mkdir);
74250
74251+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
74252+ struct proc_dir_entry *parent)
74253+{
74254+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
74255+}
74256+EXPORT_SYMBOL(proc_mkdir_restrict);
74257+
74258 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
74259 struct proc_dir_entry *parent,
74260 const struct file_operations *proc_fops,
74261diff --git a/fs/proc/inode.c b/fs/proc/inode.c
74262index 7697b66..8d8e541 100644
74263--- a/fs/proc/inode.c
74264+++ b/fs/proc/inode.c
74265@@ -24,11 +24,17 @@
74266 #include <linux/mount.h>
74267 #include <linux/magic.h>
74268 #include <linux/namei.h>
74269+#include <linux/grsecurity.h>
74270
74271 #include <asm/uaccess.h>
74272
74273 #include "internal.h"
74274
74275+#ifdef CONFIG_PROC_SYSCTL
74276+extern const struct inode_operations proc_sys_inode_operations;
74277+extern const struct inode_operations proc_sys_dir_operations;
74278+#endif
74279+
74280 static void proc_evict_inode(struct inode *inode)
74281 {
74282 struct proc_dir_entry *de;
74283@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
74284 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
74285 sysctl_head_put(head);
74286 }
74287+
74288+#ifdef CONFIG_PROC_SYSCTL
74289+ if (inode->i_op == &proc_sys_inode_operations ||
74290+ inode->i_op == &proc_sys_dir_operations)
74291+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
74292+#endif
74293+
74294 }
74295
74296 static struct kmem_cache * proc_inode_cachep;
74297@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
74298 if (de->mode) {
74299 inode->i_mode = de->mode;
74300 inode->i_uid = de->uid;
74301+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74302+ inode->i_gid = grsec_proc_gid;
74303+#else
74304 inode->i_gid = de->gid;
74305+#endif
74306 }
74307 if (de->size)
74308 inode->i_size = de->size;
74309diff --git a/fs/proc/internal.h b/fs/proc/internal.h
74310index c835b94..c9e01a3 100644
74311--- a/fs/proc/internal.h
74312+++ b/fs/proc/internal.h
74313@@ -47,9 +47,10 @@ struct proc_dir_entry {
74314 struct completion *pde_unload_completion;
74315 struct list_head pde_openers; /* who did ->open, but not ->release */
74316 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
74317+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
74318 u8 namelen;
74319 char name[];
74320-};
74321+} __randomize_layout;
74322
74323 union proc_op {
74324 int (*proc_get_link)(struct dentry *, struct path *);
74325@@ -67,7 +68,7 @@ struct proc_inode {
74326 struct ctl_table *sysctl_entry;
74327 const struct proc_ns_operations *ns_ops;
74328 struct inode vfs_inode;
74329-};
74330+} __randomize_layout;
74331
74332 /*
74333 * General functions
74334@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
74335 struct pid *, struct task_struct *);
74336 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
74337 struct pid *, struct task_struct *);
74338+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
74339+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
74340+ struct pid *, struct task_struct *);
74341+#endif
74342
74343 /*
74344 * base.c
74345@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
74346 * generic.c
74347 */
74348 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
74349+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
74350 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
74351 struct dentry *);
74352 extern int proc_readdir(struct file *, struct dir_context *);
74353+extern int proc_readdir_restrict(struct file *, struct dir_context *);
74354 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
74355
74356 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
74357diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
74358index a352d57..cb94a5c 100644
74359--- a/fs/proc/interrupts.c
74360+++ b/fs/proc/interrupts.c
74361@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
74362
74363 static int __init proc_interrupts_init(void)
74364 {
74365+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74366+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
74367+#else
74368 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
74369+#endif
74370 return 0;
74371 }
74372 fs_initcall(proc_interrupts_init);
74373diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
74374index 91a4e64..14bf8fa 100644
74375--- a/fs/proc/kcore.c
74376+++ b/fs/proc/kcore.c
74377@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74378 * the addresses in the elf_phdr on our list.
74379 */
74380 start = kc_offset_to_vaddr(*fpos - elf_buflen);
74381- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
74382+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
74383+ if (tsz > buflen)
74384 tsz = buflen;
74385-
74386+
74387 while (buflen) {
74388 struct kcore_list *m;
74389
74390@@ -515,19 +516,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74391 } else {
74392 if (kern_addr_valid(start)) {
74393 unsigned long n;
74394+ char *elf_buf;
74395+ mm_segment_t oldfs;
74396
74397- n = copy_to_user(buffer, (char *)start, tsz);
74398- /*
74399- * We cannot distinguish between fault on source
74400- * and fault on destination. When this happens
74401- * we clear too and hope it will trigger the
74402- * EFAULT again.
74403- */
74404- if (n) {
74405- if (clear_user(buffer + tsz - n,
74406- n))
74407- return -EFAULT;
74408- }
74409+ elf_buf = kzalloc(tsz, GFP_KERNEL);
74410+ if (!elf_buf)
74411+ return -ENOMEM;
74412+ oldfs = get_fs();
74413+ set_fs(KERNEL_DS);
74414+ n = __copy_from_user(elf_buf, (const void __user *)start, tsz);
74415+ set_fs(oldfs);
74416+ n = copy_to_user(buffer, elf_buf, tsz);
74417+ kfree(elf_buf);
74418+ if (n)
74419+ return -EFAULT;
74420 } else {
74421 if (clear_user(buffer, tsz))
74422 return -EFAULT;
74423@@ -547,6 +549,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74424
74425 static int open_kcore(struct inode *inode, struct file *filp)
74426 {
74427+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
74428+ return -EPERM;
74429+#endif
74430 if (!capable(CAP_SYS_RAWIO))
74431 return -EPERM;
74432 if (kcore_need_update)
74433@@ -580,7 +585,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
74434 return NOTIFY_OK;
74435 }
74436
74437-static struct notifier_block kcore_callback_nb __meminitdata = {
74438+static struct notifier_block kcore_callback_nb __meminitconst = {
74439 .notifier_call = kcore_callback,
74440 .priority = 0,
74441 };
74442diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
74443index d3ebf2e..6ad42d1 100644
74444--- a/fs/proc/meminfo.c
74445+++ b/fs/proc/meminfo.c
74446@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
74447 vmi.used >> 10,
74448 vmi.largest_chunk >> 10
74449 #ifdef CONFIG_MEMORY_FAILURE
74450- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
74451+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
74452 #endif
74453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74454 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
74455diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
74456index d4a3574..b421ce9 100644
74457--- a/fs/proc/nommu.c
74458+++ b/fs/proc/nommu.c
74459@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
74460
74461 if (file) {
74462 seq_pad(m, ' ');
74463- seq_path(m, &file->f_path, "");
74464+ seq_path(m, &file->f_path, "\n\\");
74465 }
74466
74467 seq_putc(m, '\n');
74468diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
74469index 1bde894..22ac7eb 100644
74470--- a/fs/proc/proc_net.c
74471+++ b/fs/proc/proc_net.c
74472@@ -23,9 +23,27 @@
74473 #include <linux/nsproxy.h>
74474 #include <net/net_namespace.h>
74475 #include <linux/seq_file.h>
74476+#include <linux/grsecurity.h>
74477
74478 #include "internal.h"
74479
74480+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
74481+static struct seq_operations *ipv6_seq_ops_addr;
74482+
74483+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
74484+{
74485+ ipv6_seq_ops_addr = addr;
74486+}
74487+
74488+void unregister_ipv6_seq_ops_addr(void)
74489+{
74490+ ipv6_seq_ops_addr = NULL;
74491+}
74492+
74493+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
74494+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
74495+#endif
74496+
74497 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
74498 {
74499 return pde->parent->data;
74500@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
74501 return maybe_get_net(PDE_NET(PDE(inode)));
74502 }
74503
74504+extern const struct seq_operations dev_seq_ops;
74505+
74506 int seq_open_net(struct inode *ino, struct file *f,
74507 const struct seq_operations *ops, int size)
74508 {
74509@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
74510
74511 BUG_ON(size < sizeof(*p));
74512
74513+ /* only permit access to /proc/net/dev */
74514+ if (
74515+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
74516+ ops != ipv6_seq_ops_addr &&
74517+#endif
74518+ ops != &dev_seq_ops && gr_proc_is_restricted())
74519+ return -EACCES;
74520+
74521 net = get_proc_net(ino);
74522 if (net == NULL)
74523 return -ENXIO;
74524@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
74525 int err;
74526 struct net *net;
74527
74528+ if (gr_proc_is_restricted())
74529+ return -EACCES;
74530+
74531 err = -ENXIO;
74532 net = get_proc_net(inode);
74533 if (net == NULL)
74534diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
74535index f92d5dd..26398ac 100644
74536--- a/fs/proc/proc_sysctl.c
74537+++ b/fs/proc/proc_sysctl.c
74538@@ -11,13 +11,21 @@
74539 #include <linux/namei.h>
74540 #include <linux/mm.h>
74541 #include <linux/module.h>
74542+#include <linux/nsproxy.h>
74543+#ifdef CONFIG_GRKERNSEC
74544+#include <net/net_namespace.h>
74545+#endif
74546 #include "internal.h"
74547
74548+extern int gr_handle_chroot_sysctl(const int op);
74549+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74550+ const int op);
74551+
74552 static const struct dentry_operations proc_sys_dentry_operations;
74553 static const struct file_operations proc_sys_file_operations;
74554-static const struct inode_operations proc_sys_inode_operations;
74555+const struct inode_operations proc_sys_inode_operations;
74556 static const struct file_operations proc_sys_dir_file_operations;
74557-static const struct inode_operations proc_sys_dir_operations;
74558+const struct inode_operations proc_sys_dir_operations;
74559
74560 void proc_sys_poll_notify(struct ctl_table_poll *poll)
74561 {
74562@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
74563
74564 err = NULL;
74565 d_set_d_op(dentry, &proc_sys_dentry_operations);
74566+
74567+ gr_handle_proc_create(dentry, inode);
74568+
74569 d_add(dentry, inode);
74570
74571 out:
74572@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74573 struct inode *inode = file_inode(filp);
74574 struct ctl_table_header *head = grab_header(inode);
74575 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
74576+ int op = write ? MAY_WRITE : MAY_READ;
74577 ssize_t error;
74578 size_t res;
74579
74580@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74581 * and won't be until we finish.
74582 */
74583 error = -EPERM;
74584- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
74585+ if (sysctl_perm(head, table, op))
74586 goto out;
74587
74588 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
74589@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74590 if (!table->proc_handler)
74591 goto out;
74592
74593+#ifdef CONFIG_GRKERNSEC
74594+ error = -EPERM;
74595+ if (gr_handle_chroot_sysctl(op))
74596+ goto out;
74597+ dget(filp->f_path.dentry);
74598+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
74599+ dput(filp->f_path.dentry);
74600+ goto out;
74601+ }
74602+ dput(filp->f_path.dentry);
74603+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
74604+ goto out;
74605+ if (write) {
74606+ if (current->nsproxy->net_ns != table->extra2) {
74607+ if (!capable(CAP_SYS_ADMIN))
74608+ goto out;
74609+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
74610+ goto out;
74611+ }
74612+#endif
74613+
74614 /* careful: calling conventions are nasty here */
74615 res = count;
74616 error = table->proc_handler(table, write, buf, &res, ppos);
74617@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
74618 return false;
74619 } else {
74620 d_set_d_op(child, &proc_sys_dentry_operations);
74621+
74622+ gr_handle_proc_create(child, inode);
74623+
74624 d_add(child, inode);
74625 }
74626 } else {
74627@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
74628 if ((*pos)++ < ctx->pos)
74629 return true;
74630
74631+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
74632+ return 0;
74633+
74634 if (unlikely(S_ISLNK(table->mode)))
74635 res = proc_sys_link_fill_cache(file, ctx, head, table);
74636 else
74637@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
74638 if (IS_ERR(head))
74639 return PTR_ERR(head);
74640
74641+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
74642+ return -ENOENT;
74643+
74644 generic_fillattr(inode, stat);
74645 if (table)
74646 stat->mode = (stat->mode & S_IFMT) | table->mode;
74647@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
74648 .llseek = generic_file_llseek,
74649 };
74650
74651-static const struct inode_operations proc_sys_inode_operations = {
74652+const struct inode_operations proc_sys_inode_operations = {
74653 .permission = proc_sys_permission,
74654 .setattr = proc_sys_setattr,
74655 .getattr = proc_sys_getattr,
74656 };
74657
74658-static const struct inode_operations proc_sys_dir_operations = {
74659+const struct inode_operations proc_sys_dir_operations = {
74660 .lookup = proc_sys_lookup,
74661 .permission = proc_sys_permission,
74662 .setattr = proc_sys_setattr,
74663@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
74664 static struct ctl_dir *new_dir(struct ctl_table_set *set,
74665 const char *name, int namelen)
74666 {
74667- struct ctl_table *table;
74668+ ctl_table_no_const *table;
74669 struct ctl_dir *new;
74670 struct ctl_node *node;
74671 char *new_name;
74672@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
74673 return NULL;
74674
74675 node = (struct ctl_node *)(new + 1);
74676- table = (struct ctl_table *)(node + 1);
74677+ table = (ctl_table_no_const *)(node + 1);
74678 new_name = (char *)(table + 2);
74679 memcpy(new_name, name, namelen);
74680 new_name[namelen] = '\0';
74681@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
74682 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
74683 struct ctl_table_root *link_root)
74684 {
74685- struct ctl_table *link_table, *entry, *link;
74686+ ctl_table_no_const *link_table, *link;
74687+ struct ctl_table *entry;
74688 struct ctl_table_header *links;
74689 struct ctl_node *node;
74690 char *link_name;
74691@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
74692 return NULL;
74693
74694 node = (struct ctl_node *)(links + 1);
74695- link_table = (struct ctl_table *)(node + nr_entries);
74696+ link_table = (ctl_table_no_const *)(node + nr_entries);
74697 link_name = (char *)&link_table[nr_entries + 1];
74698
74699 for (link = link_table, entry = table; entry->procname; link++, entry++) {
74700@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
74701 struct ctl_table_header ***subheader, struct ctl_table_set *set,
74702 struct ctl_table *table)
74703 {
74704- struct ctl_table *ctl_table_arg = NULL;
74705- struct ctl_table *entry, *files;
74706+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
74707+ struct ctl_table *entry;
74708 int nr_files = 0;
74709 int nr_dirs = 0;
74710 int err = -ENOMEM;
74711@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
74712 nr_files++;
74713 }
74714
74715- files = table;
74716 /* If there are mixed files and directories we need a new table */
74717 if (nr_dirs && nr_files) {
74718- struct ctl_table *new;
74719+ ctl_table_no_const *new;
74720 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
74721 GFP_KERNEL);
74722 if (!files)
74723@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
74724 /* Register everything except a directory full of subdirectories */
74725 if (nr_files || !nr_dirs) {
74726 struct ctl_table_header *header;
74727- header = __register_sysctl_table(set, path, files);
74728+ header = __register_sysctl_table(set, path, files ? files : table);
74729 if (!header) {
74730 kfree(ctl_table_arg);
74731 goto out;
74732diff --git a/fs/proc/root.c b/fs/proc/root.c
74733index e74ac9f..35e89f4 100644
74734--- a/fs/proc/root.c
74735+++ b/fs/proc/root.c
74736@@ -188,7 +188,15 @@ void __init proc_root_init(void)
74737 proc_mkdir("openprom", NULL);
74738 #endif
74739 proc_tty_init();
74740+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74741+#ifdef CONFIG_GRKERNSEC_PROC_USER
74742+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
74743+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74744+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
74745+#endif
74746+#else
74747 proc_mkdir("bus", NULL);
74748+#endif
74749 proc_sys_init();
74750 }
74751
74752diff --git a/fs/proc/stat.c b/fs/proc/stat.c
74753index 510413eb..34d9a8c 100644
74754--- a/fs/proc/stat.c
74755+++ b/fs/proc/stat.c
74756@@ -11,6 +11,7 @@
74757 #include <linux/irqnr.h>
74758 #include <linux/cputime.h>
74759 #include <linux/tick.h>
74760+#include <linux/grsecurity.h>
74761
74762 #ifndef arch_irq_stat_cpu
74763 #define arch_irq_stat_cpu(cpu) 0
74764@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
74765 u64 sum_softirq = 0;
74766 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
74767 struct timespec boottime;
74768+ int unrestricted = 1;
74769+
74770+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74771+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74772+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
74773+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74774+ && !in_group_p(grsec_proc_gid)
74775+#endif
74776+ )
74777+ unrestricted = 0;
74778+#endif
74779+#endif
74780
74781 user = nice = system = idle = iowait =
74782 irq = softirq = steal = 0;
74783@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
74784 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
74785 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
74786 idle += get_idle_time(i);
74787- iowait += get_iowait_time(i);
74788- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
74789- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
74790- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
74791- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
74792- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
74793- sum += kstat_cpu_irqs_sum(i);
74794- sum += arch_irq_stat_cpu(i);
74795+ if (unrestricted) {
74796+ iowait += get_iowait_time(i);
74797+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
74798+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
74799+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
74800+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
74801+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
74802+ sum += kstat_cpu_irqs_sum(i);
74803+ sum += arch_irq_stat_cpu(i);
74804+ for (j = 0; j < NR_SOFTIRQS; j++) {
74805+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
74806
74807- for (j = 0; j < NR_SOFTIRQS; j++) {
74808- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
74809-
74810- per_softirq_sums[j] += softirq_stat;
74811- sum_softirq += softirq_stat;
74812+ per_softirq_sums[j] += softirq_stat;
74813+ sum_softirq += softirq_stat;
74814+ }
74815 }
74816 }
74817- sum += arch_irq_stat();
74818+ if (unrestricted)
74819+ sum += arch_irq_stat();
74820
74821 seq_puts(p, "cpu ");
74822 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
74823@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
74824 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
74825 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
74826 idle = get_idle_time(i);
74827- iowait = get_iowait_time(i);
74828- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
74829- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
74830- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
74831- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
74832- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
74833+ if (unrestricted) {
74834+ iowait = get_iowait_time(i);
74835+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
74836+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
74837+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
74838+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
74839+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
74840+ }
74841 seq_printf(p, "cpu%d", i);
74842 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
74843 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
74844@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
74845
74846 /* sum again ? it could be updated? */
74847 for_each_irq_nr(j)
74848- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
74849+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
74850
74851 seq_printf(p,
74852 "\nctxt %llu\n"
74853@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
74854 "processes %lu\n"
74855 "procs_running %lu\n"
74856 "procs_blocked %lu\n",
74857- nr_context_switches(),
74858+ unrestricted ? nr_context_switches() : 0ULL,
74859 (unsigned long)jif,
74860- total_forks,
74861- nr_running(),
74862- nr_iowait());
74863+ unrestricted ? total_forks : 0UL,
74864+ unrestricted ? nr_running() : 0UL,
74865+ unrestricted ? nr_iowait() : 0UL);
74866
74867 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
74868
74869diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
74870index 6dee68d..1b4add0 100644
74871--- a/fs/proc/task_mmu.c
74872+++ b/fs/proc/task_mmu.c
74873@@ -13,12 +13,19 @@
74874 #include <linux/swap.h>
74875 #include <linux/swapops.h>
74876 #include <linux/mmu_notifier.h>
74877+#include <linux/grsecurity.h>
74878
74879 #include <asm/elf.h>
74880 #include <asm/uaccess.h>
74881 #include <asm/tlbflush.h>
74882 #include "internal.h"
74883
74884+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74885+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
74886+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
74887+ _mm->pax_flags & MF_PAX_SEGMEXEC))
74888+#endif
74889+
74890 void task_mem(struct seq_file *m, struct mm_struct *mm)
74891 {
74892 unsigned long data, text, lib, swap, ptes, pmds;
74893@@ -57,8 +64,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
74894 "VmLib:\t%8lu kB\n"
74895 "VmPTE:\t%8lu kB\n"
74896 "VmPMD:\t%8lu kB\n"
74897- "VmSwap:\t%8lu kB\n",
74898- hiwater_vm << (PAGE_SHIFT-10),
74899+ "VmSwap:\t%8lu kB\n"
74900+
74901+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
74902+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
74903+#endif
74904+
74905+ ,hiwater_vm << (PAGE_SHIFT-10),
74906 total_vm << (PAGE_SHIFT-10),
74907 mm->locked_vm << (PAGE_SHIFT-10),
74908 mm->pinned_vm << (PAGE_SHIFT-10),
74909@@ -68,7 +80,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
74910 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
74911 ptes >> 10,
74912 pmds >> 10,
74913- swap << (PAGE_SHIFT-10));
74914+ swap << (PAGE_SHIFT-10)
74915+
74916+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
74917+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74918+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
74919+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
74920+#else
74921+ , mm->context.user_cs_base
74922+ , mm->context.user_cs_limit
74923+#endif
74924+#endif
74925+
74926+ );
74927 }
74928
74929 unsigned long task_vsize(struct mm_struct *mm)
74930@@ -285,13 +309,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
74931 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
74932 }
74933
74934- /* We don't show the stack guard page in /proc/maps */
74935+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74936+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
74937+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
74938+#else
74939 start = vma->vm_start;
74940- if (stack_guard_page_start(vma, start))
74941- start += PAGE_SIZE;
74942 end = vma->vm_end;
74943- if (stack_guard_page_end(vma, end))
74944- end -= PAGE_SIZE;
74945+#endif
74946
74947 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
74948 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
74949@@ -301,7 +325,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
74950 flags & VM_WRITE ? 'w' : '-',
74951 flags & VM_EXEC ? 'x' : '-',
74952 flags & VM_MAYSHARE ? 's' : 'p',
74953+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74954+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
74955+#else
74956 pgoff,
74957+#endif
74958 MAJOR(dev), MINOR(dev), ino);
74959
74960 /*
74961@@ -310,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
74962 */
74963 if (file) {
74964 seq_pad(m, ' ');
74965- seq_path(m, &file->f_path, "\n");
74966+ seq_path(m, &file->f_path, "\n\\");
74967 goto done;
74968 }
74969
74970@@ -341,8 +369,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
74971 * Thread stack in /proc/PID/task/TID/maps or
74972 * the main process stack.
74973 */
74974- if (!is_pid || (vma->vm_start <= mm->start_stack &&
74975- vma->vm_end >= mm->start_stack)) {
74976+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
74977+ (vma->vm_start <= mm->start_stack &&
74978+ vma->vm_end >= mm->start_stack)) {
74979 name = "[stack]";
74980 } else {
74981 /* Thread stack in /proc/PID/maps */
74982@@ -362,6 +391,12 @@ done:
74983
74984 static int show_map(struct seq_file *m, void *v, int is_pid)
74985 {
74986+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74987+ if (current->exec_id != m->exec_id) {
74988+ gr_log_badprocpid("maps");
74989+ return 0;
74990+ }
74991+#endif
74992 show_map_vma(m, v, is_pid);
74993 m_cache_vma(m, v);
74994 return 0;
74995@@ -620,9 +655,18 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
74996 .private = &mss,
74997 };
74998
74999+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75000+ if (current->exec_id != m->exec_id) {
75001+ gr_log_badprocpid("smaps");
75002+ return 0;
75003+ }
75004+#endif
75005 memset(&mss, 0, sizeof mss);
75006- /* mmap_sem is held in m_start */
75007- walk_page_vma(vma, &smaps_walk);
75008+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75009+ if (!PAX_RAND_FLAGS(vma->vm_mm))
75010+#endif
75011+ /* mmap_sem is held in m_start */
75012+ walk_page_vma(vma, &smaps_walk);
75013
75014 show_map_vma(m, vma, is_pid);
75015
75016@@ -641,7 +685,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
75017 "KernelPageSize: %8lu kB\n"
75018 "MMUPageSize: %8lu kB\n"
75019 "Locked: %8lu kB\n",
75020+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75021+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
75022+#else
75023 (vma->vm_end - vma->vm_start) >> 10,
75024+#endif
75025 mss.resident >> 10,
75026 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
75027 mss.shared_clean >> 10,
75028@@ -1491,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
75029 char buffer[64];
75030 int nid;
75031
75032+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75033+ if (current->exec_id != m->exec_id) {
75034+ gr_log_badprocpid("numa_maps");
75035+ return 0;
75036+ }
75037+#endif
75038+
75039 if (!mm)
75040 return 0;
75041
75042@@ -1505,11 +1560,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
75043 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
75044 }
75045
75046+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75047+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
75048+#else
75049 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
75050+#endif
75051
75052 if (file) {
75053 seq_puts(m, " file=");
75054- seq_path(m, &file->f_path, "\n\t= ");
75055+ seq_path(m, &file->f_path, "\n\t\\= ");
75056 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
75057 seq_puts(m, " heap");
75058 } else {
75059diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
75060index 599ec2e..f1413ae 100644
75061--- a/fs/proc/task_nommu.c
75062+++ b/fs/proc/task_nommu.c
75063@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
75064 else
75065 bytes += kobjsize(mm);
75066
75067- if (current->fs && current->fs->users > 1)
75068+ if (current->fs && atomic_read(&current->fs->users) > 1)
75069 sbytes += kobjsize(current->fs);
75070 else
75071 bytes += kobjsize(current->fs);
75072@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
75073
75074 if (file) {
75075 seq_pad(m, ' ');
75076- seq_path(m, &file->f_path, "");
75077+ seq_path(m, &file->f_path, "\n\\");
75078 } else if (mm) {
75079 pid_t tid = pid_of_stack(priv, vma, is_pid);
75080
75081diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
75082index 4e61388..1a2523d 100644
75083--- a/fs/proc/vmcore.c
75084+++ b/fs/proc/vmcore.c
75085@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
75086 nr_bytes = count;
75087
75088 /* If pfn is not ram, return zeros for sparse dump files */
75089- if (pfn_is_ram(pfn) == 0)
75090- memset(buf, 0, nr_bytes);
75091- else {
75092+ if (pfn_is_ram(pfn) == 0) {
75093+ if (userbuf) {
75094+ if (clear_user((char __force_user *)buf, nr_bytes))
75095+ return -EFAULT;
75096+ } else
75097+ memset(buf, 0, nr_bytes);
75098+ } else {
75099 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
75100 offset, userbuf);
75101 if (tmp < 0)
75102@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
75103 static int copy_to(void *target, void *src, size_t size, int userbuf)
75104 {
75105 if (userbuf) {
75106- if (copy_to_user((char __user *) target, src, size))
75107+ if (copy_to_user((char __force_user *) target, src, size))
75108 return -EFAULT;
75109 } else {
75110 memcpy(target, src, size);
75111@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
75112 if (*fpos < m->offset + m->size) {
75113 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
75114 start = m->paddr + *fpos - m->offset;
75115- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
75116+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
75117 if (tmp < 0)
75118 return tmp;
75119 buflen -= tsz;
75120@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
75121 static ssize_t read_vmcore(struct file *file, char __user *buffer,
75122 size_t buflen, loff_t *fpos)
75123 {
75124- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
75125+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
75126 }
75127
75128 /*
75129diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
75130index d3fb2b6..43a8140 100644
75131--- a/fs/qnx6/qnx6.h
75132+++ b/fs/qnx6/qnx6.h
75133@@ -74,7 +74,7 @@ enum {
75134 BYTESEX_BE,
75135 };
75136
75137-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
75138+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
75139 {
75140 if (sbi->s_bytesex == BYTESEX_LE)
75141 return le64_to_cpu((__force __le64)n);
75142@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
75143 return (__force __fs64)cpu_to_be64(n);
75144 }
75145
75146-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
75147+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
75148 {
75149 if (sbi->s_bytesex == BYTESEX_LE)
75150 return le32_to_cpu((__force __le32)n);
75151diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
75152index bb2869f..d34ada8 100644
75153--- a/fs/quota/netlink.c
75154+++ b/fs/quota/netlink.c
75155@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
75156 void quota_send_warning(struct kqid qid, dev_t dev,
75157 const char warntype)
75158 {
75159- static atomic_t seq;
75160+ static atomic_unchecked_t seq;
75161 struct sk_buff *skb;
75162 void *msg_head;
75163 int ret;
75164@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
75165 "VFS: Not enough memory to send quota warning.\n");
75166 return;
75167 }
75168- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
75169+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
75170 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
75171 if (!msg_head) {
75172 printk(KERN_ERR
75173diff --git a/fs/read_write.c b/fs/read_write.c
75174index 8e1b687..bad2eec 100644
75175--- a/fs/read_write.c
75176+++ b/fs/read_write.c
75177@@ -553,7 +553,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
75178
75179 old_fs = get_fs();
75180 set_fs(get_ds());
75181- p = (__force const char __user *)buf;
75182+ p = (const char __force_user *)buf;
75183 if (count > MAX_RW_COUNT)
75184 count = MAX_RW_COUNT;
75185 if (file->f_op->write)
75186diff --git a/fs/readdir.c b/fs/readdir.c
75187index ced6791..936687b 100644
75188--- a/fs/readdir.c
75189+++ b/fs/readdir.c
75190@@ -18,6 +18,7 @@
75191 #include <linux/security.h>
75192 #include <linux/syscalls.h>
75193 #include <linux/unistd.h>
75194+#include <linux/namei.h>
75195
75196 #include <asm/uaccess.h>
75197
75198@@ -71,6 +72,7 @@ struct old_linux_dirent {
75199 struct readdir_callback {
75200 struct dir_context ctx;
75201 struct old_linux_dirent __user * dirent;
75202+ struct file * file;
75203 int result;
75204 };
75205
75206@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
75207 buf->result = -EOVERFLOW;
75208 return -EOVERFLOW;
75209 }
75210+
75211+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75212+ return 0;
75213+
75214 buf->result++;
75215 dirent = buf->dirent;
75216 if (!access_ok(VERIFY_WRITE, dirent,
75217@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
75218 if (!f.file)
75219 return -EBADF;
75220
75221+ buf.file = f.file;
75222 error = iterate_dir(f.file, &buf.ctx);
75223 if (buf.result)
75224 error = buf.result;
75225@@ -145,6 +152,7 @@ struct getdents_callback {
75226 struct dir_context ctx;
75227 struct linux_dirent __user * current_dir;
75228 struct linux_dirent __user * previous;
75229+ struct file * file;
75230 int count;
75231 int error;
75232 };
75233@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
75234 buf->error = -EOVERFLOW;
75235 return -EOVERFLOW;
75236 }
75237+
75238+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75239+ return 0;
75240+
75241 dirent = buf->previous;
75242 if (dirent) {
75243 if (__put_user(offset, &dirent->d_off))
75244@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
75245 if (!f.file)
75246 return -EBADF;
75247
75248+ buf.file = f.file;
75249 error = iterate_dir(f.file, &buf.ctx);
75250 if (error >= 0)
75251 error = buf.error;
75252@@ -230,6 +243,7 @@ struct getdents_callback64 {
75253 struct dir_context ctx;
75254 struct linux_dirent64 __user * current_dir;
75255 struct linux_dirent64 __user * previous;
75256+ struct file *file;
75257 int count;
75258 int error;
75259 };
75260@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
75261 buf->error = -EINVAL; /* only used if we fail.. */
75262 if (reclen > buf->count)
75263 return -EINVAL;
75264+
75265+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75266+ return 0;
75267+
75268 dirent = buf->previous;
75269 if (dirent) {
75270 if (__put_user(offset, &dirent->d_off))
75271@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
75272 if (!f.file)
75273 return -EBADF;
75274
75275+ buf.file = f.file;
75276 error = iterate_dir(f.file, &buf.ctx);
75277 if (error >= 0)
75278 error = buf.error;
75279diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
75280index 9c02d96..6562c10 100644
75281--- a/fs/reiserfs/do_balan.c
75282+++ b/fs/reiserfs/do_balan.c
75283@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
75284 return;
75285 }
75286
75287- atomic_inc(&fs_generation(tb->tb_sb));
75288+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
75289 do_balance_starts(tb);
75290
75291 /*
75292diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
75293index aca73dd..e3c558d 100644
75294--- a/fs/reiserfs/item_ops.c
75295+++ b/fs/reiserfs/item_ops.c
75296@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
75297 }
75298
75299 static struct item_operations errcatch_ops = {
75300- errcatch_bytes_number,
75301- errcatch_decrement_key,
75302- errcatch_is_left_mergeable,
75303- errcatch_print_item,
75304- errcatch_check_item,
75305+ .bytes_number = errcatch_bytes_number,
75306+ .decrement_key = errcatch_decrement_key,
75307+ .is_left_mergeable = errcatch_is_left_mergeable,
75308+ .print_item = errcatch_print_item,
75309+ .check_item = errcatch_check_item,
75310
75311- errcatch_create_vi,
75312- errcatch_check_left,
75313- errcatch_check_right,
75314- errcatch_part_size,
75315- errcatch_unit_num,
75316- errcatch_print_vi
75317+ .create_vi = errcatch_create_vi,
75318+ .check_left = errcatch_check_left,
75319+ .check_right = errcatch_check_right,
75320+ .part_size = errcatch_part_size,
75321+ .unit_num = errcatch_unit_num,
75322+ .print_vi = errcatch_print_vi
75323 };
75324
75325 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
75326diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
75327index 621b9f3..af527fd 100644
75328--- a/fs/reiserfs/procfs.c
75329+++ b/fs/reiserfs/procfs.c
75330@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
75331 "SMALL_TAILS " : "NO_TAILS ",
75332 replay_only(sb) ? "REPLAY_ONLY " : "",
75333 convert_reiserfs(sb) ? "CONV " : "",
75334- atomic_read(&r->s_generation_counter),
75335+ atomic_read_unchecked(&r->s_generation_counter),
75336 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
75337 SF(s_do_balance), SF(s_unneeded_left_neighbor),
75338 SF(s_good_search_by_key_reada), SF(s_bmaps),
75339diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
75340index bb79cdd..fcf49ef 100644
75341--- a/fs/reiserfs/reiserfs.h
75342+++ b/fs/reiserfs/reiserfs.h
75343@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
75344 /* Comment? -Hans */
75345 wait_queue_head_t s_wait;
75346 /* increased by one every time the tree gets re-balanced */
75347- atomic_t s_generation_counter;
75348+ atomic_unchecked_t s_generation_counter;
75349
75350 /* File system properties. Currently holds on-disk FS format */
75351 unsigned long s_properties;
75352@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
75353 #define REISERFS_USER_MEM 1 /* user memory mode */
75354
75355 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
75356-#define get_generation(s) atomic_read (&fs_generation(s))
75357+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
75358 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
75359 #define __fs_changed(gen,s) (gen != get_generation (s))
75360 #define fs_changed(gen,s) \
75361diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
75362index 71fbbe3..eff29ba 100644
75363--- a/fs/reiserfs/super.c
75364+++ b/fs/reiserfs/super.c
75365@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
75366 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
75367 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
75368 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
75369+#ifdef CONFIG_REISERFS_FS_XATTR
75370+ /* turn on user xattrs by default */
75371+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
75372+#endif
75373 /* no preallocation minimum, be smart in reiserfs_file_write instead */
75374 sbi->s_alloc_options.preallocmin = 0;
75375 /* Preallocate by 16 blocks (17-1) at once */
75376diff --git a/fs/select.c b/fs/select.c
75377index f684c75..4117611 100644
75378--- a/fs/select.c
75379+++ b/fs/select.c
75380@@ -20,6 +20,7 @@
75381 #include <linux/export.h>
75382 #include <linux/slab.h>
75383 #include <linux/poll.h>
75384+#include <linux/security.h>
75385 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
75386 #include <linux/file.h>
75387 #include <linux/fdtable.h>
75388@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
75389 struct poll_list *walk = head;
75390 unsigned long todo = nfds;
75391
75392+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
75393 if (nfds > rlimit(RLIMIT_NOFILE))
75394 return -EINVAL;
75395
75396diff --git a/fs/seq_file.c b/fs/seq_file.c
75397index 555f821..34684d7 100644
75398--- a/fs/seq_file.c
75399+++ b/fs/seq_file.c
75400@@ -12,6 +12,8 @@
75401 #include <linux/slab.h>
75402 #include <linux/cred.h>
75403 #include <linux/mm.h>
75404+#include <linux/sched.h>
75405+#include <linux/grsecurity.h>
75406
75407 #include <asm/uaccess.h>
75408 #include <asm/page.h>
75409@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
75410
75411 static void *seq_buf_alloc(unsigned long size)
75412 {
75413- void *buf;
75414-
75415- /*
75416- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
75417- * it's better to fall back to vmalloc() than to kill things.
75418- */
75419- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
75420- if (!buf && size > PAGE_SIZE)
75421- buf = vmalloc(size);
75422- return buf;
75423+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
75424 }
75425
75426 /**
75427@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
75428 #ifdef CONFIG_USER_NS
75429 p->user_ns = file->f_cred->user_ns;
75430 #endif
75431+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75432+ p->exec_id = current->exec_id;
75433+#endif
75434
75435 /*
75436 * Wrappers around seq_open(e.g. swaps_open) need to be
75437@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
75438 }
75439 EXPORT_SYMBOL(seq_open);
75440
75441+
75442+int seq_open_restrict(struct file *file, const struct seq_operations *op)
75443+{
75444+ if (gr_proc_is_restricted())
75445+ return -EACCES;
75446+
75447+ return seq_open(file, op);
75448+}
75449+EXPORT_SYMBOL(seq_open_restrict);
75450+
75451 static int traverse(struct seq_file *m, loff_t offset)
75452 {
75453 loff_t pos = 0, index;
75454@@ -158,7 +164,7 @@ Eoverflow:
75455 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
75456 {
75457 struct seq_file *m = file->private_data;
75458- size_t copied = 0;
75459+ ssize_t copied = 0;
75460 loff_t pos;
75461 size_t n;
75462 void *p;
75463@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
75464 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
75465 void *data)
75466 {
75467- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
75468+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
75469 int res = -ENOMEM;
75470
75471 if (op) {
75472@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
75473 }
75474 EXPORT_SYMBOL(single_open_size);
75475
75476+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
75477+ void *data)
75478+{
75479+ if (gr_proc_is_restricted())
75480+ return -EACCES;
75481+
75482+ return single_open(file, show, data);
75483+}
75484+EXPORT_SYMBOL(single_open_restrict);
75485+
75486+
75487 int single_release(struct inode *inode, struct file *file)
75488 {
75489 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
75490diff --git a/fs/splice.c b/fs/splice.c
75491index 7968da9..4ce985b 100644
75492--- a/fs/splice.c
75493+++ b/fs/splice.c
75494@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75495 pipe_lock(pipe);
75496
75497 for (;;) {
75498- if (!pipe->readers) {
75499+ if (!atomic_read(&pipe->readers)) {
75500 send_sig(SIGPIPE, current, 0);
75501 if (!ret)
75502 ret = -EPIPE;
75503@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75504 page_nr++;
75505 ret += buf->len;
75506
75507- if (pipe->files)
75508+ if (atomic_read(&pipe->files))
75509 do_wakeup = 1;
75510
75511 if (!--spd->nr_pages)
75512@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75513 do_wakeup = 0;
75514 }
75515
75516- pipe->waiting_writers++;
75517+ atomic_inc(&pipe->waiting_writers);
75518 pipe_wait(pipe);
75519- pipe->waiting_writers--;
75520+ atomic_dec(&pipe->waiting_writers);
75521 }
75522
75523 pipe_unlock(pipe);
75524@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
75525 old_fs = get_fs();
75526 set_fs(get_ds());
75527 /* The cast to a user pointer is valid due to the set_fs() */
75528- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
75529+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
75530 set_fs(old_fs);
75531
75532 return res;
75533@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
75534 old_fs = get_fs();
75535 set_fs(get_ds());
75536 /* The cast to a user pointer is valid due to the set_fs() */
75537- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
75538+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
75539 set_fs(old_fs);
75540
75541 return res;
75542@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
75543 goto err;
75544
75545 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
75546- vec[i].iov_base = (void __user *) page_address(page);
75547+ vec[i].iov_base = (void __force_user *) page_address(page);
75548 vec[i].iov_len = this_len;
75549 spd.pages[i] = page;
75550 spd.nr_pages++;
75551@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
75552 ops->release(pipe, buf);
75553 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
75554 pipe->nrbufs--;
75555- if (pipe->files)
75556+ if (atomic_read(&pipe->files))
75557 sd->need_wakeup = true;
75558 }
75559
75560@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
75561 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
75562 {
75563 while (!pipe->nrbufs) {
75564- if (!pipe->writers)
75565+ if (!atomic_read(&pipe->writers))
75566 return 0;
75567
75568- if (!pipe->waiting_writers && sd->num_spliced)
75569+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
75570 return 0;
75571
75572 if (sd->flags & SPLICE_F_NONBLOCK)
75573@@ -1025,7 +1025,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
75574 ops->release(pipe, buf);
75575 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
75576 pipe->nrbufs--;
75577- if (pipe->files)
75578+ if (atomic_read(&pipe->files))
75579 sd.need_wakeup = true;
75580 } else {
75581 buf->offset += ret;
75582@@ -1159,7 +1159,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75583 long ret, bytes;
75584 umode_t i_mode;
75585 size_t len;
75586- int i, flags;
75587+ int i, flags, more;
75588
75589 /*
75590 * We require the input being a regular file, as we don't want to
75591@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75592 * out of the pipe right after the splice_to_pipe(). So set
75593 * PIPE_READERS appropriately.
75594 */
75595- pipe->readers = 1;
75596+ atomic_set(&pipe->readers, 1);
75597
75598 current->splice_pipe = pipe;
75599 }
75600@@ -1202,6 +1202,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75601 * Don't block on output, we have to drain the direct pipe.
75602 */
75603 sd->flags &= ~SPLICE_F_NONBLOCK;
75604+ more = sd->flags & SPLICE_F_MORE;
75605
75606 while (len) {
75607 size_t read_len;
75608@@ -1215,6 +1216,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75609 sd->total_len = read_len;
75610
75611 /*
75612+ * If more data is pending, set SPLICE_F_MORE
75613+ * If this is the last data and SPLICE_F_MORE was not set
75614+ * initially, clears it.
75615+ */
75616+ if (read_len < len)
75617+ sd->flags |= SPLICE_F_MORE;
75618+ else if (!more)
75619+ sd->flags &= ~SPLICE_F_MORE;
75620+ /*
75621 * NOTE: nonblocking mode only applies to the input. We
75622 * must not do the output in nonblocking mode as then we
75623 * could get stuck data in the internal pipe:
75624@@ -1482,6 +1492,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
75625
75626 partial[buffers].offset = off;
75627 partial[buffers].len = plen;
75628+ partial[buffers].private = 0;
75629
75630 off = 0;
75631 len -= plen;
75632@@ -1718,9 +1729,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75633 ret = -ERESTARTSYS;
75634 break;
75635 }
75636- if (!pipe->writers)
75637+ if (!atomic_read(&pipe->writers))
75638 break;
75639- if (!pipe->waiting_writers) {
75640+ if (!atomic_read(&pipe->waiting_writers)) {
75641 if (flags & SPLICE_F_NONBLOCK) {
75642 ret = -EAGAIN;
75643 break;
75644@@ -1752,7 +1763,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75645 pipe_lock(pipe);
75646
75647 while (pipe->nrbufs >= pipe->buffers) {
75648- if (!pipe->readers) {
75649+ if (!atomic_read(&pipe->readers)) {
75650 send_sig(SIGPIPE, current, 0);
75651 ret = -EPIPE;
75652 break;
75653@@ -1765,9 +1776,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75654 ret = -ERESTARTSYS;
75655 break;
75656 }
75657- pipe->waiting_writers++;
75658+ atomic_inc(&pipe->waiting_writers);
75659 pipe_wait(pipe);
75660- pipe->waiting_writers--;
75661+ atomic_dec(&pipe->waiting_writers);
75662 }
75663
75664 pipe_unlock(pipe);
75665@@ -1803,14 +1814,14 @@ retry:
75666 pipe_double_lock(ipipe, opipe);
75667
75668 do {
75669- if (!opipe->readers) {
75670+ if (!atomic_read(&opipe->readers)) {
75671 send_sig(SIGPIPE, current, 0);
75672 if (!ret)
75673 ret = -EPIPE;
75674 break;
75675 }
75676
75677- if (!ipipe->nrbufs && !ipipe->writers)
75678+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
75679 break;
75680
75681 /*
75682@@ -1907,7 +1918,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
75683 pipe_double_lock(ipipe, opipe);
75684
75685 do {
75686- if (!opipe->readers) {
75687+ if (!atomic_read(&opipe->readers)) {
75688 send_sig(SIGPIPE, current, 0);
75689 if (!ret)
75690 ret = -EPIPE;
75691@@ -1952,7 +1963,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
75692 * return EAGAIN if we have the potential of some data in the
75693 * future, otherwise just return 0
75694 */
75695- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
75696+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
75697 ret = -EAGAIN;
75698
75699 pipe_unlock(ipipe);
75700diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
75701index 92fcde7..1687329 100644
75702--- a/fs/squashfs/xattr.c
75703+++ b/fs/squashfs/xattr.c
75704@@ -46,8 +46,8 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75705 + msblk->xattr_table;
75706 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
75707 int count = squashfs_i(inode)->xattr_count;
75708- size_t rest = buffer_size;
75709- int err;
75710+ size_t used = 0;
75711+ ssize_t err;
75712
75713 /* check that the file system has xattrs */
75714 if (msblk->xattr_id_table == NULL)
75715@@ -68,11 +68,11 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75716 name_size = le16_to_cpu(entry.size);
75717 handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
75718 if (handler)
75719- prefix_size = handler->list(d, buffer, rest, NULL,
75720+ prefix_size = handler->list(d, buffer, buffer ? buffer_size - used : 0, NULL,
75721 name_size, handler->flags);
75722 if (prefix_size) {
75723 if (buffer) {
75724- if (prefix_size + name_size + 1 > rest) {
75725+ if (prefix_size + name_size + 1 > buffer_size - used) {
75726 err = -ERANGE;
75727 goto failed;
75728 }
75729@@ -86,7 +86,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75730 buffer[name_size] = '\0';
75731 buffer += name_size + 1;
75732 }
75733- rest -= prefix_size + name_size + 1;
75734+ used += prefix_size + name_size + 1;
75735 } else {
75736 /* no handler or insuffficient privileges, so skip */
75737 err = squashfs_read_metadata(sb, NULL, &start,
75738@@ -107,7 +107,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
75739 if (err < 0)
75740 goto failed;
75741 }
75742- err = buffer_size - rest;
75743+ err = used;
75744
75745 failed:
75746 return err;
75747diff --git a/fs/stat.c b/fs/stat.c
75748index ae0c3ce..9ee641c 100644
75749--- a/fs/stat.c
75750+++ b/fs/stat.c
75751@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
75752 stat->gid = inode->i_gid;
75753 stat->rdev = inode->i_rdev;
75754 stat->size = i_size_read(inode);
75755- stat->atime = inode->i_atime;
75756- stat->mtime = inode->i_mtime;
75757+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
75758+ stat->atime = inode->i_ctime;
75759+ stat->mtime = inode->i_ctime;
75760+ } else {
75761+ stat->atime = inode->i_atime;
75762+ stat->mtime = inode->i_mtime;
75763+ }
75764 stat->ctime = inode->i_ctime;
75765 stat->blksize = (1 << inode->i_blkbits);
75766 stat->blocks = inode->i_blocks;
75767@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
75768 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
75769 {
75770 struct inode *inode = path->dentry->d_inode;
75771+ int retval;
75772
75773- if (inode->i_op->getattr)
75774- return inode->i_op->getattr(path->mnt, path->dentry, stat);
75775+ if (inode->i_op->getattr) {
75776+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
75777+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
75778+ stat->atime = stat->ctime;
75779+ stat->mtime = stat->ctime;
75780+ }
75781+ return retval;
75782+ }
75783
75784 generic_fillattr(inode, stat);
75785 return 0;
75786diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
75787index 0b45ff4..edf9d3a 100644
75788--- a/fs/sysfs/dir.c
75789+++ b/fs/sysfs/dir.c
75790@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
75791 kfree(buf);
75792 }
75793
75794+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
75795+extern int grsec_enable_sysfs_restrict;
75796+#endif
75797+
75798 /**
75799 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
75800 * @kobj: object we're creating directory for
75801@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
75802 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
75803 {
75804 struct kernfs_node *parent, *kn;
75805+ const char *name;
75806+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
75807+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
75808+ const char *parent_name;
75809+#endif
75810
75811 BUG_ON(!kobj);
75812
75813+ name = kobject_name(kobj);
75814+
75815 if (kobj->parent)
75816 parent = kobj->parent->sd;
75817 else
75818@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
75819 if (!parent)
75820 return -ENOENT;
75821
75822- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
75823- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
75824+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
75825+ parent_name = parent->name;
75826+ mode = S_IRWXU;
75827+
75828+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
75829+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
75830+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
75831+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
75832+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
75833+ if (!grsec_enable_sysfs_restrict)
75834+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
75835+#endif
75836+
75837+ kn = kernfs_create_dir_ns(parent, name,
75838+ mode, kobj, ns);
75839 if (IS_ERR(kn)) {
75840 if (PTR_ERR(kn) == -EEXIST)
75841- sysfs_warn_dup(parent, kobject_name(kobj));
75842+ sysfs_warn_dup(parent, name);
75843 return PTR_ERR(kn);
75844 }
75845
75846diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
75847index 69d4889..a810bd4 100644
75848--- a/fs/sysv/sysv.h
75849+++ b/fs/sysv/sysv.h
75850@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
75851 #endif
75852 }
75853
75854-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
75855+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
75856 {
75857 if (sbi->s_bytesex == BYTESEX_PDP)
75858 return PDP_swab((__force __u32)n);
75859diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
75860index fb08b0c..65fcc7e 100644
75861--- a/fs/ubifs/io.c
75862+++ b/fs/ubifs/io.c
75863@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
75864 return err;
75865 }
75866
75867-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
75868+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
75869 {
75870 int err;
75871
75872diff --git a/fs/udf/misc.c b/fs/udf/misc.c
75873index c175b4d..8f36a16 100644
75874--- a/fs/udf/misc.c
75875+++ b/fs/udf/misc.c
75876@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
75877
75878 u8 udf_tag_checksum(const struct tag *t)
75879 {
75880- u8 *data = (u8 *)t;
75881+ const u8 *data = (const u8 *)t;
75882 u8 checksum = 0;
75883 int i;
75884 for (i = 0; i < sizeof(struct tag); ++i)
75885diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
75886index 8d974c4..b82f6ec 100644
75887--- a/fs/ufs/swab.h
75888+++ b/fs/ufs/swab.h
75889@@ -22,7 +22,7 @@ enum {
75890 BYTESEX_BE
75891 };
75892
75893-static inline u64
75894+static inline u64 __intentional_overflow(-1)
75895 fs64_to_cpu(struct super_block *sbp, __fs64 n)
75896 {
75897 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
75898@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
75899 return (__force __fs64)cpu_to_be64(n);
75900 }
75901
75902-static inline u32
75903+static inline u32 __intentional_overflow(-1)
75904 fs32_to_cpu(struct super_block *sbp, __fs32 n)
75905 {
75906 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
75907diff --git a/fs/utimes.c b/fs/utimes.c
75908index aa138d6..5f3a811 100644
75909--- a/fs/utimes.c
75910+++ b/fs/utimes.c
75911@@ -1,6 +1,7 @@
75912 #include <linux/compiler.h>
75913 #include <linux/file.h>
75914 #include <linux/fs.h>
75915+#include <linux/security.h>
75916 #include <linux/linkage.h>
75917 #include <linux/mount.h>
75918 #include <linux/namei.h>
75919@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
75920 }
75921 }
75922 retry_deleg:
75923+
75924+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
75925+ error = -EACCES;
75926+ goto mnt_drop_write_and_out;
75927+ }
75928+
75929 mutex_lock(&inode->i_mutex);
75930 error = notify_change(path->dentry, &newattrs, &delegated_inode);
75931 mutex_unlock(&inode->i_mutex);
75932diff --git a/fs/xattr.c b/fs/xattr.c
75933index 4ef6985..a6cd6567 100644
75934--- a/fs/xattr.c
75935+++ b/fs/xattr.c
75936@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
75937 return rc;
75938 }
75939
75940+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
75941+ssize_t
75942+pax_getxattr(struct dentry *dentry, void *value, size_t size)
75943+{
75944+ struct inode *inode = dentry->d_inode;
75945+ ssize_t error;
75946+
75947+ error = inode_permission(inode, MAY_EXEC);
75948+ if (error)
75949+ return error;
75950+
75951+ if (inode->i_op->getxattr)
75952+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
75953+ else
75954+ error = -EOPNOTSUPP;
75955+
75956+ return error;
75957+}
75958+EXPORT_SYMBOL(pax_getxattr);
75959+#endif
75960+
75961 ssize_t
75962 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
75963 {
75964@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
75965 * Extended attribute SET operations
75966 */
75967 static long
75968-setxattr(struct dentry *d, const char __user *name, const void __user *value,
75969+setxattr(struct path *path, const char __user *name, const void __user *value,
75970 size_t size, int flags)
75971 {
75972 int error;
75973@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
75974 posix_acl_fix_xattr_from_user(kvalue, size);
75975 }
75976
75977- error = vfs_setxattr(d, kname, kvalue, size, flags);
75978+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
75979+ error = -EACCES;
75980+ goto out;
75981+ }
75982+
75983+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
75984 out:
75985 if (vvalue)
75986 vfree(vvalue);
75987@@ -376,7 +402,7 @@ retry:
75988 return error;
75989 error = mnt_want_write(path.mnt);
75990 if (!error) {
75991- error = setxattr(path.dentry, name, value, size, flags);
75992+ error = setxattr(&path, name, value, size, flags);
75993 mnt_drop_write(path.mnt);
75994 }
75995 path_put(&path);
75996@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
75997 audit_file(f.file);
75998 error = mnt_want_write_file(f.file);
75999 if (!error) {
76000- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
76001+ error = setxattr(&f.file->f_path, name, value, size, flags);
76002 mnt_drop_write_file(f.file);
76003 }
76004 fdput(f);
76005@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
76006 * Extended attribute REMOVE operations
76007 */
76008 static long
76009-removexattr(struct dentry *d, const char __user *name)
76010+removexattr(struct path *path, const char __user *name)
76011 {
76012 int error;
76013 char kname[XATTR_NAME_MAX + 1];
76014@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
76015 if (error < 0)
76016 return error;
76017
76018- return vfs_removexattr(d, kname);
76019+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
76020+ return -EACCES;
76021+
76022+ return vfs_removexattr(path->dentry, kname);
76023 }
76024
76025 static int path_removexattr(const char __user *pathname,
76026@@ -623,7 +652,7 @@ retry:
76027 return error;
76028 error = mnt_want_write(path.mnt);
76029 if (!error) {
76030- error = removexattr(path.dentry, name);
76031+ error = removexattr(&path, name);
76032 mnt_drop_write(path.mnt);
76033 }
76034 path_put(&path);
76035@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
76036 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
76037 {
76038 struct fd f = fdget(fd);
76039+ struct path *path;
76040 int error = -EBADF;
76041
76042 if (!f.file)
76043 return error;
76044+ path = &f.file->f_path;
76045 audit_file(f.file);
76046 error = mnt_want_write_file(f.file);
76047 if (!error) {
76048- error = removexattr(f.file->f_path.dentry, name);
76049+ error = removexattr(path, name);
76050 mnt_drop_write_file(f.file);
76051 }
76052 fdput(f);
76053diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
76054index 61ec015..7c18807 100644
76055--- a/fs/xfs/libxfs/xfs_bmap.c
76056+++ b/fs/xfs/libxfs/xfs_bmap.c
76057@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
76058
76059 #else
76060 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
76061-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
76062+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
76063 #endif /* DEBUG */
76064
76065 /*
76066diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
76067index 098cd78..724d3f8 100644
76068--- a/fs/xfs/xfs_dir2_readdir.c
76069+++ b/fs/xfs/xfs_dir2_readdir.c
76070@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
76071 ino = dp->d_ops->sf_get_ino(sfp, sfep);
76072 filetype = dp->d_ops->sf_get_ftype(sfep);
76073 ctx->pos = off & 0x7fffffff;
76074- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
76075+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
76076+ char name[sfep->namelen];
76077+ memcpy(name, sfep->name, sfep->namelen);
76078+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
76079+ return 0;
76080+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
76081 xfs_dir3_get_dtype(dp->i_mount, filetype)))
76082 return 0;
76083 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
76084diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
76085index ac4feae..386d551 100644
76086--- a/fs/xfs/xfs_ioctl.c
76087+++ b/fs/xfs/xfs_ioctl.c
76088@@ -120,7 +120,7 @@ xfs_find_handle(
76089 }
76090
76091 error = -EFAULT;
76092- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
76093+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
76094 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
76095 goto out_put;
76096
76097diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
76098index c31d2c2..6ec8f62 100644
76099--- a/fs/xfs/xfs_linux.h
76100+++ b/fs/xfs/xfs_linux.h
76101@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
76102 * of the compiler which do not like us using do_div in the middle
76103 * of large functions.
76104 */
76105-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
76106+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
76107 {
76108 __u32 mod;
76109
76110@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
76111 return 0;
76112 }
76113 #else
76114-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
76115+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
76116 {
76117 __u32 mod;
76118
76119diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
76120new file mode 100644
76121index 0000000..31f8fe4
76122--- /dev/null
76123+++ b/grsecurity/Kconfig
76124@@ -0,0 +1,1182 @@
76125+#
76126+# grecurity configuration
76127+#
76128+menu "Memory Protections"
76129+depends on GRKERNSEC
76130+
76131+config GRKERNSEC_KMEM
76132+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
76133+ default y if GRKERNSEC_CONFIG_AUTO
76134+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
76135+ help
76136+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
76137+ be written to or read from to modify or leak the contents of the running
76138+ kernel. /dev/port will also not be allowed to be opened, writing to
76139+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
76140+ If you have module support disabled, enabling this will close up several
76141+ ways that are currently used to insert malicious code into the running
76142+ kernel.
76143+
76144+ Even with this feature enabled, we still highly recommend that
76145+ you use the RBAC system, as it is still possible for an attacker to
76146+ modify the running kernel through other more obscure methods.
76147+
76148+ It is highly recommended that you say Y here if you meet all the
76149+ conditions above.
76150+
76151+config GRKERNSEC_VM86
76152+ bool "Restrict VM86 mode"
76153+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76154+ depends on X86_32
76155+
76156+ help
76157+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
76158+ make use of a special execution mode on 32bit x86 processors called
76159+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
76160+ video cards and will still work with this option enabled. The purpose
76161+ of the option is to prevent exploitation of emulation errors in
76162+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
76163+ Nearly all users should be able to enable this option.
76164+
76165+config GRKERNSEC_IO
76166+ bool "Disable privileged I/O"
76167+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76168+ depends on X86
76169+ select RTC_CLASS
76170+ select RTC_INTF_DEV
76171+ select RTC_DRV_CMOS
76172+
76173+ help
76174+ If you say Y here, all ioperm and iopl calls will return an error.
76175+ Ioperm and iopl can be used to modify the running kernel.
76176+ Unfortunately, some programs need this access to operate properly,
76177+ the most notable of which are XFree86 and hwclock. hwclock can be
76178+ remedied by having RTC support in the kernel, so real-time
76179+ clock support is enabled if this option is enabled, to ensure
76180+ that hwclock operates correctly. If hwclock still does not work,
76181+ either update udev or symlink /dev/rtc to /dev/rtc0.
76182+
76183+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
76184+ you may not be able to boot into a graphical environment with this
76185+ option enabled. In this case, you should use the RBAC system instead.
76186+
76187+config GRKERNSEC_BPF_HARDEN
76188+ bool "Harden BPF interpreter"
76189+ default y if GRKERNSEC_CONFIG_AUTO
76190+ help
76191+ Unlike previous versions of grsecurity that hardened both the BPF
76192+ interpreted code against corruption at rest as well as the JIT code
76193+ against JIT-spray attacks and attacker-controlled immediate values
76194+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
76195+ and will ensure the interpreted code is read-only at rest. This feature
76196+ may be removed at a later time when eBPF stabilizes to entirely revert
76197+ back to the more secure pre-3.16 BPF interpreter/JIT.
76198+
76199+ If you're using KERNEXEC, it's recommended that you enable this option
76200+ to supplement the hardening of the kernel.
76201+
76202+config GRKERNSEC_PERF_HARDEN
76203+ bool "Disable unprivileged PERF_EVENTS usage by default"
76204+ default y if GRKERNSEC_CONFIG_AUTO
76205+ depends on PERF_EVENTS
76206+ help
76207+ If you say Y here, the range of acceptable values for the
76208+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
76209+ default to a new value: 3. When the sysctl is set to this value, no
76210+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
76211+
76212+ Though PERF_EVENTS can be used legitimately for performance monitoring
76213+ and low-level application profiling, it is forced on regardless of
76214+ configuration, has been at fault for several vulnerabilities, and
76215+ creates new opportunities for side channels and other information leaks.
76216+
76217+ This feature puts PERF_EVENTS into a secure default state and permits
76218+ the administrator to change out of it temporarily if unprivileged
76219+ application profiling is needed.
76220+
76221+config GRKERNSEC_RAND_THREADSTACK
76222+ bool "Insert random gaps between thread stacks"
76223+ default y if GRKERNSEC_CONFIG_AUTO
76224+ depends on PAX_RANDMMAP && !PPC
76225+ help
76226+ If you say Y here, a random-sized gap will be enforced between allocated
76227+ thread stacks. Glibc's NPTL and other threading libraries that
76228+ pass MAP_STACK to the kernel for thread stack allocation are supported.
76229+ The implementation currently provides 8 bits of entropy for the gap.
76230+
76231+ Many distributions do not compile threaded remote services with the
76232+ -fstack-check argument to GCC, causing the variable-sized stack-based
76233+ allocator, alloca(), to not probe the stack on allocation. This
76234+ permits an unbounded alloca() to skip over any guard page and potentially
76235+ modify another thread's stack reliably. An enforced random gap
76236+ reduces the reliability of such an attack and increases the chance
76237+ that such a read/write to another thread's stack instead lands in
76238+ an unmapped area, causing a crash and triggering grsecurity's
76239+ anti-bruteforcing logic.
76240+
76241+config GRKERNSEC_PROC_MEMMAP
76242+ bool "Harden ASLR against information leaks and entropy reduction"
76243+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
76244+ depends on PAX_NOEXEC || PAX_ASLR
76245+ help
76246+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
76247+ give no information about the addresses of its mappings if
76248+ PaX features that rely on random addresses are enabled on the task.
76249+ In addition to sanitizing this information and disabling other
76250+ dangerous sources of information, this option causes reads of sensitive
76251+ /proc/<pid> entries where the file descriptor was opened in a different
76252+ task than the one performing the read. Such attempts are logged.
76253+ This option also limits argv/env strings for suid/sgid binaries
76254+ to 512KB to prevent a complete exhaustion of the stack entropy provided
76255+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
76256+ binaries to prevent alternative mmap layouts from being abused.
76257+
76258+ If you use PaX it is essential that you say Y here as it closes up
76259+ several holes that make full ASLR useless locally.
76260+
76261+
76262+config GRKERNSEC_KSTACKOVERFLOW
76263+ bool "Prevent kernel stack overflows"
76264+ default y if GRKERNSEC_CONFIG_AUTO
76265+ depends on !IA64 && 64BIT
76266+ help
76267+ If you say Y here, the kernel's process stacks will be allocated
76268+ with vmalloc instead of the kernel's default allocator. This
76269+ introduces guard pages that in combination with the alloca checking
76270+ of the STACKLEAK feature prevents all forms of kernel process stack
76271+ overflow abuse. Note that this is different from kernel stack
76272+ buffer overflows.
76273+
76274+config GRKERNSEC_BRUTE
76275+ bool "Deter exploit bruteforcing"
76276+ default y if GRKERNSEC_CONFIG_AUTO
76277+ help
76278+ If you say Y here, attempts to bruteforce exploits against forking
76279+ daemons such as apache or sshd, as well as against suid/sgid binaries
76280+ will be deterred. When a child of a forking daemon is killed by PaX
76281+ or crashes due to an illegal instruction or other suspicious signal,
76282+ the parent process will be delayed 30 seconds upon every subsequent
76283+ fork until the administrator is able to assess the situation and
76284+ restart the daemon.
76285+ In the suid/sgid case, the attempt is logged, the user has all their
76286+ existing instances of the suid/sgid binary terminated and will
76287+ be unable to execute any suid/sgid binaries for 15 minutes.
76288+
76289+ It is recommended that you also enable signal logging in the auditing
76290+ section so that logs are generated when a process triggers a suspicious
76291+ signal.
76292+ If the sysctl option is enabled, a sysctl option with name
76293+ "deter_bruteforce" is created.
76294+
76295+config GRKERNSEC_MODHARDEN
76296+ bool "Harden module auto-loading"
76297+ default y if GRKERNSEC_CONFIG_AUTO
76298+ depends on MODULES
76299+ help
76300+ If you say Y here, module auto-loading in response to use of some
76301+ feature implemented by an unloaded module will be restricted to
76302+ root users. Enabling this option helps defend against attacks
76303+ by unprivileged users who abuse the auto-loading behavior to
76304+ cause a vulnerable module to load that is then exploited.
76305+
76306+ If this option prevents a legitimate use of auto-loading for a
76307+ non-root user, the administrator can execute modprobe manually
76308+ with the exact name of the module mentioned in the alert log.
76309+ Alternatively, the administrator can add the module to the list
76310+ of modules loaded at boot by modifying init scripts.
76311+
76312+ Modification of init scripts will most likely be needed on
76313+ Ubuntu servers with encrypted home directory support enabled,
76314+ as the first non-root user logging in will cause the ecb(aes),
76315+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
76316+
76317+config GRKERNSEC_HIDESYM
76318+ bool "Hide kernel symbols"
76319+ default y if GRKERNSEC_CONFIG_AUTO
76320+ select PAX_USERCOPY_SLABS
76321+ help
76322+ If you say Y here, getting information on loaded modules, and
76323+ displaying all kernel symbols through a syscall will be restricted
76324+ to users with CAP_SYS_MODULE. For software compatibility reasons,
76325+ /proc/kallsyms will be restricted to the root user. The RBAC
76326+ system can hide that entry even from root.
76327+
76328+ This option also prevents leaking of kernel addresses through
76329+ several /proc entries.
76330+
76331+ Note that this option is only effective provided the following
76332+ conditions are met:
76333+ 1) The kernel using grsecurity is not precompiled by some distribution
76334+ 2) You have also enabled GRKERNSEC_DMESG
76335+ 3) You are using the RBAC system and hiding other files such as your
76336+ kernel image and System.map. Alternatively, enabling this option
76337+ causes the permissions on /boot, /lib/modules, and the kernel
76338+ source directory to change at compile time to prevent
76339+ reading by non-root users.
76340+ If the above conditions are met, this option will aid in providing a
76341+ useful protection against local kernel exploitation of overflows
76342+ and arbitrary read/write vulnerabilities.
76343+
76344+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
76345+ in addition to this feature.
76346+
76347+config GRKERNSEC_RANDSTRUCT
76348+ bool "Randomize layout of sensitive kernel structures"
76349+ default y if GRKERNSEC_CONFIG_AUTO
76350+ select GRKERNSEC_HIDESYM
76351+ select MODVERSIONS if MODULES
76352+ help
76353+ If you say Y here, the layouts of a number of sensitive kernel
76354+ structures (task, fs, cred, etc) and all structures composed entirely
76355+ of function pointers (aka "ops" structs) will be randomized at compile-time.
76356+ This can introduce the requirement of an additional infoleak
76357+ vulnerability for exploits targeting these structure types.
76358+
76359+ Enabling this feature will introduce some performance impact, slightly
76360+ increase memory usage, and prevent the use of forensic tools like
76361+ Volatility against the system (unless the kernel source tree isn't
76362+ cleaned after kernel installation).
76363+
76364+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
76365+ It remains after a make clean to allow for external modules to be compiled
76366+ with the existing seed and will be removed by a make mrproper or
76367+ make distclean.
76368+
76369+ Note that the implementation requires gcc 4.6.4. or newer. You may need
76370+ to install the supporting headers explicitly in addition to the normal
76371+ gcc package.
76372+
76373+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
76374+ bool "Use cacheline-aware structure randomization"
76375+ depends on GRKERNSEC_RANDSTRUCT
76376+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
76377+ help
76378+ If you say Y here, the RANDSTRUCT randomization will make a best effort
76379+ at restricting randomization to cacheline-sized groups of elements. It
76380+ will further not randomize bitfields in structures. This reduces the
76381+ performance hit of RANDSTRUCT at the cost of weakened randomization.
76382+
76383+config GRKERNSEC_KERN_LOCKOUT
76384+ bool "Active kernel exploit response"
76385+ default y if GRKERNSEC_CONFIG_AUTO
76386+ depends on X86 || ARM || PPC || SPARC
76387+ help
76388+ If you say Y here, when a PaX alert is triggered due to suspicious
76389+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
76390+ or an OOPS occurs due to bad memory accesses, instead of just
76391+ terminating the offending process (and potentially allowing
76392+ a subsequent exploit from the same user), we will take one of two
76393+ actions:
76394+ If the user was root, we will panic the system
76395+ If the user was non-root, we will log the attempt, terminate
76396+ all processes owned by the user, then prevent them from creating
76397+ any new processes until the system is restarted
76398+ This deters repeated kernel exploitation/bruteforcing attempts
76399+ and is useful for later forensics.
76400+
76401+config GRKERNSEC_OLD_ARM_USERLAND
76402+ bool "Old ARM userland compatibility"
76403+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
76404+ help
76405+ If you say Y here, stubs of executable code to perform such operations
76406+ as "compare-exchange" will be placed at fixed locations in the ARM vector
76407+ table. This is unfortunately needed for old ARM userland meant to run
76408+ across a wide range of processors. Without this option enabled,
76409+ the get_tls and data memory barrier stubs will be emulated by the kernel,
76410+ which is enough for Linaro userlands or other userlands designed for v6
76411+ and newer ARM CPUs. It's recommended that you try without this option enabled
76412+ first, and only enable it if your userland does not boot (it will likely fail
76413+ at init time).
76414+
76415+endmenu
76416+menu "Role Based Access Control Options"
76417+depends on GRKERNSEC
76418+
76419+config GRKERNSEC_RBAC_DEBUG
76420+ bool
76421+
76422+config GRKERNSEC_NO_RBAC
76423+ bool "Disable RBAC system"
76424+ help
76425+ If you say Y here, the /dev/grsec device will be removed from the kernel,
76426+ preventing the RBAC system from being enabled. You should only say Y
76427+ here if you have no intention of using the RBAC system, so as to prevent
76428+ an attacker with root access from misusing the RBAC system to hide files
76429+ and processes when loadable module support and /dev/[k]mem have been
76430+ locked down.
76431+
76432+config GRKERNSEC_ACL_HIDEKERN
76433+ bool "Hide kernel processes"
76434+ help
76435+ If you say Y here, all kernel threads will be hidden to all
76436+ processes but those whose subject has the "view hidden processes"
76437+ flag.
76438+
76439+config GRKERNSEC_ACL_MAXTRIES
76440+ int "Maximum tries before password lockout"
76441+ default 3
76442+ help
76443+ This option enforces the maximum number of times a user can attempt
76444+ to authorize themselves with the grsecurity RBAC system before being
76445+ denied the ability to attempt authorization again for a specified time.
76446+ The lower the number, the harder it will be to brute-force a password.
76447+
76448+config GRKERNSEC_ACL_TIMEOUT
76449+ int "Time to wait after max password tries, in seconds"
76450+ default 30
76451+ help
76452+ This option specifies the time the user must wait after attempting to
76453+ authorize to the RBAC system with the maximum number of invalid
76454+ passwords. The higher the number, the harder it will be to brute-force
76455+ a password.
76456+
76457+endmenu
76458+menu "Filesystem Protections"
76459+depends on GRKERNSEC
76460+
76461+config GRKERNSEC_PROC
76462+ bool "Proc restrictions"
76463+ default y if GRKERNSEC_CONFIG_AUTO
76464+ help
76465+ If you say Y here, the permissions of the /proc filesystem
76466+ will be altered to enhance system security and privacy. You MUST
76467+ choose either a user only restriction or a user and group restriction.
76468+ Depending upon the option you choose, you can either restrict users to
76469+ see only the processes they themselves run, or choose a group that can
76470+ view all processes and files normally restricted to root if you choose
76471+ the "restrict to user only" option. NOTE: If you're running identd or
76472+ ntpd as a non-root user, you will have to run it as the group you
76473+ specify here.
76474+
76475+config GRKERNSEC_PROC_USER
76476+ bool "Restrict /proc to user only"
76477+ depends on GRKERNSEC_PROC
76478+ help
76479+ If you say Y here, non-root users will only be able to view their own
76480+ processes, and restricts them from viewing network-related information,
76481+ and viewing kernel symbol and module information.
76482+
76483+config GRKERNSEC_PROC_USERGROUP
76484+ bool "Allow special group"
76485+ default y if GRKERNSEC_CONFIG_AUTO
76486+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
76487+ help
76488+ If you say Y here, you will be able to select a group that will be
76489+ able to view all processes and network-related information. If you've
76490+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
76491+ remain hidden. This option is useful if you want to run identd as
76492+ a non-root user. The group you select may also be chosen at boot time
76493+ via "grsec_proc_gid=" on the kernel commandline.
76494+
76495+config GRKERNSEC_PROC_GID
76496+ int "GID for special group"
76497+ depends on GRKERNSEC_PROC_USERGROUP
76498+ default 1001
76499+
76500+config GRKERNSEC_PROC_ADD
76501+ bool "Additional restrictions"
76502+ default y if GRKERNSEC_CONFIG_AUTO
76503+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
76504+ help
76505+ If you say Y here, additional restrictions will be placed on
76506+ /proc that keep normal users from viewing device information and
76507+ slabinfo information that could be useful for exploits.
76508+
76509+config GRKERNSEC_LINK
76510+ bool "Linking restrictions"
76511+ default y if GRKERNSEC_CONFIG_AUTO
76512+ help
76513+ If you say Y here, /tmp race exploits will be prevented, since users
76514+ will no longer be able to follow symlinks owned by other users in
76515+ world-writable +t directories (e.g. /tmp), unless the owner of the
76516+ symlink is the owner of the directory. users will also not be
76517+ able to hardlink to files they do not own. If the sysctl option is
76518+ enabled, a sysctl option with name "linking_restrictions" is created.
76519+
76520+config GRKERNSEC_SYMLINKOWN
76521+ bool "Kernel-enforced SymlinksIfOwnerMatch"
76522+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
76523+ help
76524+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
76525+ that prevents it from being used as a security feature. As Apache
76526+ verifies the symlink by performing a stat() against the target of
76527+ the symlink before it is followed, an attacker can setup a symlink
76528+ to point to a same-owned file, then replace the symlink with one
76529+ that targets another user's file just after Apache "validates" the
76530+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
76531+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
76532+ will be in place for the group you specify. If the sysctl option
76533+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
76534+ created.
76535+
76536+config GRKERNSEC_SYMLINKOWN_GID
76537+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
76538+ depends on GRKERNSEC_SYMLINKOWN
76539+ default 1006
76540+ help
76541+ Setting this GID determines what group kernel-enforced
76542+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
76543+ is enabled, a sysctl option with name "symlinkown_gid" is created.
76544+
76545+config GRKERNSEC_FIFO
76546+ bool "FIFO restrictions"
76547+ default y if GRKERNSEC_CONFIG_AUTO
76548+ help
76549+ If you say Y here, users will not be able to write to FIFOs they don't
76550+ own in world-writable +t directories (e.g. /tmp), unless the owner of
76551+ the FIFO is the same owner of the directory it's held in. If the sysctl
76552+ option is enabled, a sysctl option with name "fifo_restrictions" is
76553+ created.
76554+
76555+config GRKERNSEC_SYSFS_RESTRICT
76556+ bool "Sysfs/debugfs restriction"
76557+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76558+ depends on SYSFS
76559+ help
76560+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
76561+ any filesystem normally mounted under it (e.g. debugfs) will be
76562+ mostly accessible only by root. These filesystems generally provide access
76563+ to hardware and debug information that isn't appropriate for unprivileged
76564+ users of the system. Sysfs and debugfs have also become a large source
76565+ of new vulnerabilities, ranging from infoleaks to local compromise.
76566+ There has been very little oversight with an eye toward security involved
76567+ in adding new exporters of information to these filesystems, so their
76568+ use is discouraged.
76569+ For reasons of compatibility, a few directories have been whitelisted
76570+ for access by non-root users:
76571+ /sys/fs/selinux
76572+ /sys/fs/fuse
76573+ /sys/devices/system/cpu
76574+
76575+config GRKERNSEC_ROFS
76576+ bool "Runtime read-only mount protection"
76577+ depends on SYSCTL
76578+ help
76579+ If you say Y here, a sysctl option with name "romount_protect" will
76580+ be created. By setting this option to 1 at runtime, filesystems
76581+ will be protected in the following ways:
76582+ * No new writable mounts will be allowed
76583+ * Existing read-only mounts won't be able to be remounted read/write
76584+ * Write operations will be denied on all block devices
76585+ This option acts independently of grsec_lock: once it is set to 1,
76586+ it cannot be turned off. Therefore, please be mindful of the resulting
76587+ behavior if this option is enabled in an init script on a read-only
76588+ filesystem.
76589+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
76590+ and GRKERNSEC_IO should be enabled and module loading disabled via
76591+ config or at runtime.
76592+ This feature is mainly intended for secure embedded systems.
76593+
76594+
76595+config GRKERNSEC_DEVICE_SIDECHANNEL
76596+ bool "Eliminate stat/notify-based device sidechannels"
76597+ default y if GRKERNSEC_CONFIG_AUTO
76598+ help
76599+ If you say Y here, timing analyses on block or character
76600+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
76601+ will be thwarted for unprivileged users. If a process without
76602+ CAP_MKNOD stats such a device, the last access and last modify times
76603+ will match the device's create time. No access or modify events
76604+ will be triggered through inotify/dnotify/fanotify for such devices.
76605+ This feature will prevent attacks that may at a minimum
76606+ allow an attacker to determine the administrator's password length.
76607+
76608+config GRKERNSEC_CHROOT
76609+ bool "Chroot jail restrictions"
76610+ default y if GRKERNSEC_CONFIG_AUTO
76611+ help
76612+ If you say Y here, you will be able to choose several options that will
76613+ make breaking out of a chrooted jail much more difficult. If you
76614+ encounter no software incompatibilities with the following options, it
76615+ is recommended that you enable each one.
76616+
76617+ Note that the chroot restrictions are not intended to apply to "chroots"
76618+ to directories that are simple bind mounts of the global root filesystem.
76619+ For several other reasons, a user shouldn't expect any significant
76620+ security by performing such a chroot.
76621+
76622+config GRKERNSEC_CHROOT_MOUNT
76623+ bool "Deny mounts"
76624+ default y if GRKERNSEC_CONFIG_AUTO
76625+ depends on GRKERNSEC_CHROOT
76626+ help
76627+ If you say Y here, processes inside a chroot will not be able to
76628+ mount or remount filesystems. If the sysctl option is enabled, a
76629+ sysctl option with name "chroot_deny_mount" is created.
76630+
76631+config GRKERNSEC_CHROOT_DOUBLE
76632+ bool "Deny double-chroots"
76633+ default y if GRKERNSEC_CONFIG_AUTO
76634+ depends on GRKERNSEC_CHROOT
76635+ help
76636+ If you say Y here, processes inside a chroot will not be able to chroot
76637+ again outside the chroot. This is a widely used method of breaking
76638+ out of a chroot jail and should not be allowed. If the sysctl
76639+ option is enabled, a sysctl option with name
76640+ "chroot_deny_chroot" is created.
76641+
76642+config GRKERNSEC_CHROOT_PIVOT
76643+ bool "Deny pivot_root in chroot"
76644+ default y if GRKERNSEC_CONFIG_AUTO
76645+ depends on GRKERNSEC_CHROOT
76646+ help
76647+ If you say Y here, processes inside a chroot will not be able to use
76648+ a function called pivot_root() that was introduced in Linux 2.3.41. It
76649+ works similar to chroot in that it changes the root filesystem. This
76650+ function could be misused in a chrooted process to attempt to break out
76651+ of the chroot, and therefore should not be allowed. If the sysctl
76652+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
76653+ created.
76654+
76655+config GRKERNSEC_CHROOT_CHDIR
76656+ bool "Enforce chdir(\"/\") on all chroots"
76657+ default y if GRKERNSEC_CONFIG_AUTO
76658+ depends on GRKERNSEC_CHROOT
76659+ help
76660+ If you say Y here, the current working directory of all newly-chrooted
76661+ applications will be set to the the root directory of the chroot.
76662+ The man page on chroot(2) states:
76663+ Note that this call does not change the current working
76664+ directory, so that `.' can be outside the tree rooted at
76665+ `/'. In particular, the super-user can escape from a
76666+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
76667+
76668+ It is recommended that you say Y here, since it's not known to break
76669+ any software. If the sysctl option is enabled, a sysctl option with
76670+ name "chroot_enforce_chdir" is created.
76671+
76672+config GRKERNSEC_CHROOT_CHMOD
76673+ bool "Deny (f)chmod +s"
76674+ default y if GRKERNSEC_CONFIG_AUTO
76675+ depends on GRKERNSEC_CHROOT
76676+ help
76677+ If you say Y here, processes inside a chroot will not be able to chmod
76678+ or fchmod files to make them have suid or sgid bits. This protects
76679+ against another published method of breaking a chroot. If the sysctl
76680+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
76681+ created.
76682+
76683+config GRKERNSEC_CHROOT_FCHDIR
76684+ bool "Deny fchdir and fhandle out of chroot"
76685+ default y if GRKERNSEC_CONFIG_AUTO
76686+ depends on GRKERNSEC_CHROOT
76687+ help
76688+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
76689+ to a file descriptor of the chrooting process that points to a directory
76690+ outside the filesystem will be stopped. Additionally, this option prevents
76691+ use of the recently-created syscall for opening files by a guessable "file
76692+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
76693+ with name "chroot_deny_fchdir" is created.
76694+
76695+config GRKERNSEC_CHROOT_MKNOD
76696+ bool "Deny mknod"
76697+ default y if GRKERNSEC_CONFIG_AUTO
76698+ depends on GRKERNSEC_CHROOT
76699+ help
76700+ If you say Y here, processes inside a chroot will not be allowed to
76701+ mknod. The problem with using mknod inside a chroot is that it
76702+ would allow an attacker to create a device entry that is the same
76703+ as one on the physical root of your system, which could range from
76704+ anything from the console device to a device for your harddrive (which
76705+ they could then use to wipe the drive or steal data). It is recommended
76706+ that you say Y here, unless you run into software incompatibilities.
76707+ If the sysctl option is enabled, a sysctl option with name
76708+ "chroot_deny_mknod" is created.
76709+
76710+config GRKERNSEC_CHROOT_SHMAT
76711+ bool "Deny shmat() out of chroot"
76712+ default y if GRKERNSEC_CONFIG_AUTO
76713+ depends on GRKERNSEC_CHROOT
76714+ help
76715+ If you say Y here, processes inside a chroot will not be able to attach
76716+ to shared memory segments that were created outside of the chroot jail.
76717+ It is recommended that you say Y here. If the sysctl option is enabled,
76718+ a sysctl option with name "chroot_deny_shmat" is created.
76719+
76720+config GRKERNSEC_CHROOT_UNIX
76721+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
76722+ default y if GRKERNSEC_CONFIG_AUTO
76723+ depends on GRKERNSEC_CHROOT
76724+ help
76725+ If you say Y here, processes inside a chroot will not be able to
76726+ connect to abstract (meaning not belonging to a filesystem) Unix
76727+ domain sockets that were bound outside of a chroot. It is recommended
76728+ that you say Y here. If the sysctl option is enabled, a sysctl option
76729+ with name "chroot_deny_unix" is created.
76730+
76731+config GRKERNSEC_CHROOT_FINDTASK
76732+ bool "Protect outside processes"
76733+ default y if GRKERNSEC_CONFIG_AUTO
76734+ depends on GRKERNSEC_CHROOT
76735+ help
76736+ If you say Y here, processes inside a chroot will not be able to
76737+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
76738+ getsid, or view any process outside of the chroot. If the sysctl
76739+ option is enabled, a sysctl option with name "chroot_findtask" is
76740+ created.
76741+
76742+config GRKERNSEC_CHROOT_NICE
76743+ bool "Restrict priority changes"
76744+ default y if GRKERNSEC_CONFIG_AUTO
76745+ depends on GRKERNSEC_CHROOT
76746+ help
76747+ If you say Y here, processes inside a chroot will not be able to raise
76748+ the priority of processes in the chroot, or alter the priority of
76749+ processes outside the chroot. This provides more security than simply
76750+ removing CAP_SYS_NICE from the process' capability set. If the
76751+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
76752+ is created.
76753+
76754+config GRKERNSEC_CHROOT_SYSCTL
76755+ bool "Deny sysctl writes"
76756+ default y if GRKERNSEC_CONFIG_AUTO
76757+ depends on GRKERNSEC_CHROOT
76758+ help
76759+ If you say Y here, an attacker in a chroot will not be able to
76760+ write to sysctl entries, either by sysctl(2) or through a /proc
76761+ interface. It is strongly recommended that you say Y here. If the
76762+ sysctl option is enabled, a sysctl option with name
76763+ "chroot_deny_sysctl" is created.
76764+
76765+config GRKERNSEC_CHROOT_RENAME
76766+ bool "Deny bad renames"
76767+ default y if GRKERNSEC_CONFIG_AUTO
76768+ depends on GRKERNSEC_CHROOT
76769+ help
76770+ If you say Y here, an attacker in a chroot will not be able to
76771+ abuse the ability to create double chroots to break out of the
76772+ chroot by exploiting a race condition between a rename of a directory
76773+ within a chroot against an open of a symlink with relative path
76774+ components. This feature will likewise prevent an accomplice outside
76775+ a chroot from enabling a user inside the chroot to break out and make
76776+ use of their credentials on the global filesystem. Enabling this
76777+ feature is essential to prevent root users from breaking out of a
76778+ chroot. If the sysctl option is enabled, a sysctl option with name
76779+ "chroot_deny_bad_rename" is created.
76780+
76781+config GRKERNSEC_CHROOT_CAPS
76782+ bool "Capability restrictions"
76783+ default y if GRKERNSEC_CONFIG_AUTO
76784+ depends on GRKERNSEC_CHROOT
76785+ help
76786+ If you say Y here, the capabilities on all processes within a
76787+ chroot jail will be lowered to stop module insertion, raw i/o,
76788+ system and net admin tasks, rebooting the system, modifying immutable
76789+ files, modifying IPC owned by another, and changing the system time.
76790+ This is left an option because it can break some apps. Disable this
76791+ if your chrooted apps are having problems performing those kinds of
76792+ tasks. If the sysctl option is enabled, a sysctl option with
76793+ name "chroot_caps" is created.
76794+
76795+config GRKERNSEC_CHROOT_INITRD
76796+ bool "Exempt initrd tasks from restrictions"
76797+ default y if GRKERNSEC_CONFIG_AUTO
76798+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
76799+ help
76800+ If you say Y here, tasks started prior to init will be exempted from
76801+ grsecurity's chroot restrictions. This option is mainly meant to
76802+ resolve Plymouth's performing privileged operations unnecessarily
76803+ in a chroot.
76804+
76805+endmenu
76806+menu "Kernel Auditing"
76807+depends on GRKERNSEC
76808+
76809+config GRKERNSEC_AUDIT_GROUP
76810+ bool "Single group for auditing"
76811+ help
76812+ If you say Y here, the exec and chdir logging features will only operate
76813+ on a group you specify. This option is recommended if you only want to
76814+ watch certain users instead of having a large amount of logs from the
76815+ entire system. If the sysctl option is enabled, a sysctl option with
76816+ name "audit_group" is created.
76817+
76818+config GRKERNSEC_AUDIT_GID
76819+ int "GID for auditing"
76820+ depends on GRKERNSEC_AUDIT_GROUP
76821+ default 1007
76822+
76823+config GRKERNSEC_EXECLOG
76824+ bool "Exec logging"
76825+ help
76826+ If you say Y here, all execve() calls will be logged (since the
76827+ other exec*() calls are frontends to execve(), all execution
76828+ will be logged). Useful for shell-servers that like to keep track
76829+ of their users. If the sysctl option is enabled, a sysctl option with
76830+ name "exec_logging" is created.
76831+ WARNING: This option when enabled will produce a LOT of logs, especially
76832+ on an active system.
76833+
76834+config GRKERNSEC_RESLOG
76835+ bool "Resource logging"
76836+ default y if GRKERNSEC_CONFIG_AUTO
76837+ help
76838+ If you say Y here, all attempts to overstep resource limits will
76839+ be logged with the resource name, the requested size, and the current
76840+ limit. It is highly recommended that you say Y here. If the sysctl
76841+ option is enabled, a sysctl option with name "resource_logging" is
76842+ created. If the RBAC system is enabled, the sysctl value is ignored.
76843+
76844+config GRKERNSEC_CHROOT_EXECLOG
76845+ bool "Log execs within chroot"
76846+ help
76847+ If you say Y here, all executions inside a chroot jail will be logged
76848+ to syslog. This can cause a large amount of logs if certain
76849+ applications (eg. djb's daemontools) are installed on the system, and
76850+ is therefore left as an option. If the sysctl option is enabled, a
76851+ sysctl option with name "chroot_execlog" is created.
76852+
76853+config GRKERNSEC_AUDIT_PTRACE
76854+ bool "Ptrace logging"
76855+ help
76856+ If you say Y here, all attempts to attach to a process via ptrace
76857+ will be logged. If the sysctl option is enabled, a sysctl option
76858+ with name "audit_ptrace" is created.
76859+
76860+config GRKERNSEC_AUDIT_CHDIR
76861+ bool "Chdir logging"
76862+ help
76863+ If you say Y here, all chdir() calls will be logged. If the sysctl
76864+ option is enabled, a sysctl option with name "audit_chdir" is created.
76865+
76866+config GRKERNSEC_AUDIT_MOUNT
76867+ bool "(Un)Mount logging"
76868+ help
76869+ If you say Y here, all mounts and unmounts will be logged. If the
76870+ sysctl option is enabled, a sysctl option with name "audit_mount" is
76871+ created.
76872+
76873+config GRKERNSEC_SIGNAL
76874+ bool "Signal logging"
76875+ default y if GRKERNSEC_CONFIG_AUTO
76876+ help
76877+ If you say Y here, certain important signals will be logged, such as
76878+ SIGSEGV, which will as a result inform you of when a error in a program
76879+ occurred, which in some cases could mean a possible exploit attempt.
76880+ If the sysctl option is enabled, a sysctl option with name
76881+ "signal_logging" is created.
76882+
76883+config GRKERNSEC_FORKFAIL
76884+ bool "Fork failure logging"
76885+ help
76886+ If you say Y here, all failed fork() attempts will be logged.
76887+ This could suggest a fork bomb, or someone attempting to overstep
76888+ their process limit. If the sysctl option is enabled, a sysctl option
76889+ with name "forkfail_logging" is created.
76890+
76891+config GRKERNSEC_TIME
76892+ bool "Time change logging"
76893+ default y if GRKERNSEC_CONFIG_AUTO
76894+ help
76895+ If you say Y here, any changes of the system clock will be logged.
76896+ If the sysctl option is enabled, a sysctl option with name
76897+ "timechange_logging" is created.
76898+
76899+config GRKERNSEC_PROC_IPADDR
76900+ bool "/proc/<pid>/ipaddr support"
76901+ default y if GRKERNSEC_CONFIG_AUTO
76902+ help
76903+ If you say Y here, a new entry will be added to each /proc/<pid>
76904+ directory that contains the IP address of the person using the task.
76905+ The IP is carried across local TCP and AF_UNIX stream sockets.
76906+ This information can be useful for IDS/IPSes to perform remote response
76907+ to a local attack. The entry is readable by only the owner of the
76908+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
76909+ the RBAC system), and thus does not create privacy concerns.
76910+
76911+config GRKERNSEC_RWXMAP_LOG
76912+ bool 'Denied RWX mmap/mprotect logging'
76913+ default y if GRKERNSEC_CONFIG_AUTO
76914+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
76915+ help
76916+ If you say Y here, calls to mmap() and mprotect() with explicit
76917+ usage of PROT_WRITE and PROT_EXEC together will be logged when
76918+ denied by the PAX_MPROTECT feature. This feature will also
76919+ log other problematic scenarios that can occur when PAX_MPROTECT
76920+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
76921+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
76922+ is created.
76923+
76924+endmenu
76925+
76926+menu "Executable Protections"
76927+depends on GRKERNSEC
76928+
76929+config GRKERNSEC_DMESG
76930+ bool "Dmesg(8) restriction"
76931+ default y if GRKERNSEC_CONFIG_AUTO
76932+ help
76933+ If you say Y here, non-root users will not be able to use dmesg(8)
76934+ to view the contents of the kernel's circular log buffer.
76935+ The kernel's log buffer often contains kernel addresses and other
76936+ identifying information useful to an attacker in fingerprinting a
76937+ system for a targeted exploit.
76938+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
76939+ created.
76940+
76941+config GRKERNSEC_HARDEN_PTRACE
76942+ bool "Deter ptrace-based process snooping"
76943+ default y if GRKERNSEC_CONFIG_AUTO
76944+ help
76945+ If you say Y here, TTY sniffers and other malicious monitoring
76946+ programs implemented through ptrace will be defeated. If you
76947+ have been using the RBAC system, this option has already been
76948+ enabled for several years for all users, with the ability to make
76949+ fine-grained exceptions.
76950+
76951+ This option only affects the ability of non-root users to ptrace
76952+ processes that are not a descendent of the ptracing process.
76953+ This means that strace ./binary and gdb ./binary will still work,
76954+ but attaching to arbitrary processes will not. If the sysctl
76955+ option is enabled, a sysctl option with name "harden_ptrace" is
76956+ created.
76957+
76958+config GRKERNSEC_PTRACE_READEXEC
76959+ bool "Require read access to ptrace sensitive binaries"
76960+ default y if GRKERNSEC_CONFIG_AUTO
76961+ help
76962+ If you say Y here, unprivileged users will not be able to ptrace unreadable
76963+ binaries. This option is useful in environments that
76964+ remove the read bits (e.g. file mode 4711) from suid binaries to
76965+ prevent infoleaking of their contents. This option adds
76966+ consistency to the use of that file mode, as the binary could normally
76967+ be read out when run without privileges while ptracing.
76968+
76969+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
76970+ is created.
76971+
76972+config GRKERNSEC_SETXID
76973+ bool "Enforce consistent multithreaded privileges"
76974+ default y if GRKERNSEC_CONFIG_AUTO
76975+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
76976+ help
76977+ If you say Y here, a change from a root uid to a non-root uid
76978+ in a multithreaded application will cause the resulting uids,
76979+ gids, supplementary groups, and capabilities in that thread
76980+ to be propagated to the other threads of the process. In most
76981+ cases this is unnecessary, as glibc will emulate this behavior
76982+ on behalf of the application. Other libcs do not act in the
76983+ same way, allowing the other threads of the process to continue
76984+ running with root privileges. If the sysctl option is enabled,
76985+ a sysctl option with name "consistent_setxid" is created.
76986+
76987+config GRKERNSEC_HARDEN_IPC
76988+ bool "Disallow access to overly-permissive IPC objects"
76989+ default y if GRKERNSEC_CONFIG_AUTO
76990+ depends on SYSVIPC
76991+ help
76992+ If you say Y here, access to overly-permissive IPC objects (shared
76993+ memory, message queues, and semaphores) will be denied for processes
76994+ given the following criteria beyond normal permission checks:
76995+ 1) If the IPC object is world-accessible and the euid doesn't match
76996+ that of the creator or current uid for the IPC object
76997+ 2) If the IPC object is group-accessible and the egid doesn't
76998+ match that of the creator or current gid for the IPC object
76999+ It's a common error to grant too much permission to these objects,
77000+ with impact ranging from denial of service and information leaking to
77001+ privilege escalation. This feature was developed in response to
77002+ research by Tim Brown:
77003+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
77004+ who found hundreds of such insecure usages. Processes with
77005+ CAP_IPC_OWNER are still permitted to access these IPC objects.
77006+ If the sysctl option is enabled, a sysctl option with name
77007+ "harden_ipc" is created.
77008+
77009+config GRKERNSEC_TPE
77010+ bool "Trusted Path Execution (TPE)"
77011+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
77012+ help
77013+ If you say Y here, you will be able to choose a gid to add to the
77014+ supplementary groups of users you want to mark as "untrusted."
77015+ These users will not be able to execute any files that are not in
77016+ root-owned directories writable only by root. If the sysctl option
77017+ is enabled, a sysctl option with name "tpe" is created.
77018+
77019+config GRKERNSEC_TPE_ALL
77020+ bool "Partially restrict all non-root users"
77021+ depends on GRKERNSEC_TPE
77022+ help
77023+ If you say Y here, all non-root users will be covered under
77024+ a weaker TPE restriction. This is separate from, and in addition to,
77025+ the main TPE options that you have selected elsewhere. Thus, if a
77026+ "trusted" GID is chosen, this restriction applies to even that GID.
77027+ Under this restriction, all non-root users will only be allowed to
77028+ execute files in directories they own that are not group or
77029+ world-writable, or in directories owned by root and writable only by
77030+ root. If the sysctl option is enabled, a sysctl option with name
77031+ "tpe_restrict_all" is created.
77032+
77033+config GRKERNSEC_TPE_INVERT
77034+ bool "Invert GID option"
77035+ depends on GRKERNSEC_TPE
77036+ help
77037+ If you say Y here, the group you specify in the TPE configuration will
77038+ decide what group TPE restrictions will be *disabled* for. This
77039+ option is useful if you want TPE restrictions to be applied to most
77040+ users on the system. If the sysctl option is enabled, a sysctl option
77041+ with name "tpe_invert" is created. Unlike other sysctl options, this
77042+ entry will default to on for backward-compatibility.
77043+
77044+config GRKERNSEC_TPE_GID
77045+ int
77046+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
77047+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
77048+
77049+config GRKERNSEC_TPE_UNTRUSTED_GID
77050+ int "GID for TPE-untrusted users"
77051+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
77052+ default 1005
77053+ help
77054+ Setting this GID determines what group TPE restrictions will be
77055+ *enabled* for. If the sysctl option is enabled, a sysctl option
77056+ with name "tpe_gid" is created.
77057+
77058+config GRKERNSEC_TPE_TRUSTED_GID
77059+ int "GID for TPE-trusted users"
77060+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
77061+ default 1005
77062+ help
77063+ Setting this GID determines what group TPE restrictions will be
77064+ *disabled* for. If the sysctl option is enabled, a sysctl option
77065+ with name "tpe_gid" is created.
77066+
77067+endmenu
77068+menu "Network Protections"
77069+depends on GRKERNSEC
77070+
77071+config GRKERNSEC_BLACKHOLE
77072+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
77073+ default y if GRKERNSEC_CONFIG_AUTO
77074+ depends on NET
77075+ help
77076+ If you say Y here, neither TCP resets nor ICMP
77077+ destination-unreachable packets will be sent in response to packets
77078+ sent to ports for which no associated listening process exists.
77079+ It will also prevent the sending of ICMP protocol unreachable packets
77080+ in response to packets with unknown protocols.
77081+ This feature supports both IPV4 and IPV6 and exempts the
77082+ loopback interface from blackholing. Enabling this feature
77083+ makes a host more resilient to DoS attacks and reduces network
77084+ visibility against scanners.
77085+
77086+ The blackhole feature as-implemented is equivalent to the FreeBSD
77087+ blackhole feature, as it prevents RST responses to all packets, not
77088+ just SYNs. Under most application behavior this causes no
77089+ problems, but applications (like haproxy) may not close certain
77090+ connections in a way that cleanly terminates them on the remote
77091+ end, leaving the remote host in LAST_ACK state. Because of this
77092+ side-effect and to prevent intentional LAST_ACK DoSes, this
77093+ feature also adds automatic mitigation against such attacks.
77094+ The mitigation drastically reduces the amount of time a socket
77095+ can spend in LAST_ACK state. If you're using haproxy and not
77096+ all servers it connects to have this option enabled, consider
77097+ disabling this feature on the haproxy host.
77098+
77099+ If the sysctl option is enabled, two sysctl options with names
77100+ "ip_blackhole" and "lastack_retries" will be created.
77101+ While "ip_blackhole" takes the standard zero/non-zero on/off
77102+ toggle, "lastack_retries" uses the same kinds of values as
77103+ "tcp_retries1" and "tcp_retries2". The default value of 4
77104+ prevents a socket from lasting more than 45 seconds in LAST_ACK
77105+ state.
77106+
77107+config GRKERNSEC_NO_SIMULT_CONNECT
77108+ bool "Disable TCP Simultaneous Connect"
77109+ default y if GRKERNSEC_CONFIG_AUTO
77110+ depends on NET
77111+ help
77112+ If you say Y here, a feature by Willy Tarreau will be enabled that
77113+ removes a weakness in Linux's strict implementation of TCP that
77114+ allows two clients to connect to each other without either entering
77115+ a listening state. The weakness allows an attacker to easily prevent
77116+ a client from connecting to a known server provided the source port
77117+ for the connection is guessed correctly.
77118+
77119+ As the weakness could be used to prevent an antivirus or IPS from
77120+ fetching updates, or prevent an SSL gateway from fetching a CRL,
77121+ it should be eliminated by enabling this option. Though Linux is
77122+ one of few operating systems supporting simultaneous connect, it
77123+ has no legitimate use in practice and is rarely supported by firewalls.
77124+
77125+config GRKERNSEC_SOCKET
77126+ bool "Socket restrictions"
77127+ depends on NET
77128+ help
77129+ If you say Y here, you will be able to choose from several options.
77130+ If you assign a GID on your system and add it to the supplementary
77131+ groups of users you want to restrict socket access to, this patch
77132+ will perform up to three things, based on the option(s) you choose.
77133+
77134+config GRKERNSEC_SOCKET_ALL
77135+ bool "Deny any sockets to group"
77136+ depends on GRKERNSEC_SOCKET
77137+ help
77138+ If you say Y here, you will be able to choose a GID of whose users will
77139+ be unable to connect to other hosts from your machine or run server
77140+ applications from your machine. If the sysctl option is enabled, a
77141+ sysctl option with name "socket_all" is created.
77142+
77143+config GRKERNSEC_SOCKET_ALL_GID
77144+ int "GID to deny all sockets for"
77145+ depends on GRKERNSEC_SOCKET_ALL
77146+ default 1004
77147+ help
77148+ Here you can choose the GID to disable socket access for. Remember to
77149+ add the users you want socket access disabled for to the GID
77150+ specified here. If the sysctl option is enabled, a sysctl option
77151+ with name "socket_all_gid" is created.
77152+
77153+config GRKERNSEC_SOCKET_CLIENT
77154+ bool "Deny client sockets to group"
77155+ depends on GRKERNSEC_SOCKET
77156+ help
77157+ If you say Y here, you will be able to choose a GID of whose users will
77158+ be unable to connect to other hosts from your machine, but will be
77159+ able to run servers. If this option is enabled, all users in the group
77160+ you specify will have to use passive mode when initiating ftp transfers
77161+ from the shell on your machine. If the sysctl option is enabled, a
77162+ sysctl option with name "socket_client" is created.
77163+
77164+config GRKERNSEC_SOCKET_CLIENT_GID
77165+ int "GID to deny client sockets for"
77166+ depends on GRKERNSEC_SOCKET_CLIENT
77167+ default 1003
77168+ help
77169+ Here you can choose the GID to disable client socket access for.
77170+ Remember to add the users you want client socket access disabled for to
77171+ the GID specified here. If the sysctl option is enabled, a sysctl
77172+ option with name "socket_client_gid" is created.
77173+
77174+config GRKERNSEC_SOCKET_SERVER
77175+ bool "Deny server sockets to group"
77176+ depends on GRKERNSEC_SOCKET
77177+ help
77178+ If you say Y here, you will be able to choose a GID of whose users will
77179+ be unable to run server applications from your machine. If the sysctl
77180+ option is enabled, a sysctl option with name "socket_server" is created.
77181+
77182+config GRKERNSEC_SOCKET_SERVER_GID
77183+ int "GID to deny server sockets for"
77184+ depends on GRKERNSEC_SOCKET_SERVER
77185+ default 1002
77186+ help
77187+ Here you can choose the GID to disable server socket access for.
77188+ Remember to add the users you want server socket access disabled for to
77189+ the GID specified here. If the sysctl option is enabled, a sysctl
77190+ option with name "socket_server_gid" is created.
77191+
77192+endmenu
77193+
77194+menu "Physical Protections"
77195+depends on GRKERNSEC
77196+
77197+config GRKERNSEC_DENYUSB
77198+ bool "Deny new USB connections after toggle"
77199+ default y if GRKERNSEC_CONFIG_AUTO
77200+ depends on SYSCTL && USB_SUPPORT
77201+ help
77202+ If you say Y here, a new sysctl option with name "deny_new_usb"
77203+ will be created. Setting its value to 1 will prevent any new
77204+ USB devices from being recognized by the OS. Any attempted USB
77205+ device insertion will be logged. This option is intended to be
77206+ used against custom USB devices designed to exploit vulnerabilities
77207+ in various USB device drivers.
77208+
77209+ For greatest effectiveness, this sysctl should be set after any
77210+ relevant init scripts. This option is safe to enable in distros
77211+ as each user can choose whether or not to toggle the sysctl.
77212+
77213+config GRKERNSEC_DENYUSB_FORCE
77214+ bool "Reject all USB devices not connected at boot"
77215+ select USB
77216+ depends on GRKERNSEC_DENYUSB
77217+ help
77218+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
77219+ that doesn't involve a sysctl entry. This option should only be
77220+ enabled if you're sure you want to deny all new USB connections
77221+ at runtime and don't want to modify init scripts. This should not
77222+ be enabled by distros. It forces the core USB code to be built
77223+ into the kernel image so that all devices connected at boot time
77224+ can be recognized and new USB device connections can be prevented
77225+ prior to init running.
77226+
77227+endmenu
77228+
77229+menu "Sysctl Support"
77230+depends on GRKERNSEC && SYSCTL
77231+
77232+config GRKERNSEC_SYSCTL
77233+ bool "Sysctl support"
77234+ default y if GRKERNSEC_CONFIG_AUTO
77235+ help
77236+ If you say Y here, you will be able to change the options that
77237+ grsecurity runs with at bootup, without having to recompile your
77238+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
77239+ to enable (1) or disable (0) various features. All the sysctl entries
77240+ are mutable until the "grsec_lock" entry is set to a non-zero value.
77241+ All features enabled in the kernel configuration are disabled at boot
77242+ if you do not say Y to the "Turn on features by default" option.
77243+ All options should be set at startup, and the grsec_lock entry should
77244+ be set to a non-zero value after all the options are set.
77245+ *THIS IS EXTREMELY IMPORTANT*
77246+
77247+config GRKERNSEC_SYSCTL_DISTRO
77248+ bool "Extra sysctl support for distro makers (READ HELP)"
77249+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
77250+ help
77251+ If you say Y here, additional sysctl options will be created
77252+ for features that affect processes running as root. Therefore,
77253+ it is critical when using this option that the grsec_lock entry be
77254+ enabled after boot. Only distros with prebuilt kernel packages
77255+ with this option enabled that can ensure grsec_lock is enabled
77256+ after boot should use this option.
77257+ *Failure to set grsec_lock after boot makes all grsec features
77258+ this option covers useless*
77259+
77260+ Currently this option creates the following sysctl entries:
77261+ "Disable Privileged I/O": "disable_priv_io"
77262+
77263+config GRKERNSEC_SYSCTL_ON
77264+ bool "Turn on features by default"
77265+ default y if GRKERNSEC_CONFIG_AUTO
77266+ depends on GRKERNSEC_SYSCTL
77267+ help
77268+ If you say Y here, instead of having all features enabled in the
77269+ kernel configuration disabled at boot time, the features will be
77270+ enabled at boot time. It is recommended you say Y here unless
77271+ there is some reason you would want all sysctl-tunable features to
77272+ be disabled by default. As mentioned elsewhere, it is important
77273+ to enable the grsec_lock entry once you have finished modifying
77274+ the sysctl entries.
77275+
77276+endmenu
77277+menu "Logging Options"
77278+depends on GRKERNSEC
77279+
77280+config GRKERNSEC_FLOODTIME
77281+ int "Seconds in between log messages (minimum)"
77282+ default 10
77283+ help
77284+ This option allows you to enforce the number of seconds between
77285+ grsecurity log messages. The default should be suitable for most
77286+ people, however, if you choose to change it, choose a value small enough
77287+ to allow informative logs to be produced, but large enough to
77288+ prevent flooding.
77289+
77290+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
77291+ any rate limiting on grsecurity log messages.
77292+
77293+config GRKERNSEC_FLOODBURST
77294+ int "Number of messages in a burst (maximum)"
77295+ default 6
77296+ help
77297+ This option allows you to choose the maximum number of messages allowed
77298+ within the flood time interval you chose in a separate option. The
77299+ default should be suitable for most people, however if you find that
77300+ many of your logs are being interpreted as flooding, you may want to
77301+ raise this value.
77302+
77303+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
77304+ any rate limiting on grsecurity log messages.
77305+
77306+endmenu
77307diff --git a/grsecurity/Makefile b/grsecurity/Makefile
77308new file mode 100644
77309index 0000000..30ababb
77310--- /dev/null
77311+++ b/grsecurity/Makefile
77312@@ -0,0 +1,54 @@
77313+# grsecurity – access control and security hardening for Linux
77314+# All code in this directory and various hooks located throughout the Linux kernel are
77315+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
77316+# http://www.grsecurity.net spender@grsecurity.net
77317+#
77318+# This program is free software; you can redistribute it and/or
77319+# modify it under the terms of the GNU General Public License version 2
77320+# as published by the Free Software Foundation.
77321+#
77322+# This program is distributed in the hope that it will be useful,
77323+# but WITHOUT ANY WARRANTY; without even the implied warranty of
77324+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
77325+# GNU General Public License for more details.
77326+#
77327+# You should have received a copy of the GNU General Public License
77328+# along with this program; if not, write to the Free Software
77329+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
77330+
77331+KBUILD_CFLAGS += -Werror
77332+
77333+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
77334+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
77335+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
77336+ grsec_usb.o grsec_ipc.o grsec_proc.o
77337+
77338+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
77339+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
77340+ gracl_learn.o grsec_log.o gracl_policy.o
77341+ifdef CONFIG_COMPAT
77342+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
77343+endif
77344+
77345+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
77346+
77347+ifdef CONFIG_NET
77348+obj-y += grsec_sock.o
77349+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
77350+endif
77351+
77352+ifndef CONFIG_GRKERNSEC
77353+obj-y += grsec_disabled.o
77354+endif
77355+
77356+ifdef CONFIG_GRKERNSEC_HIDESYM
77357+extra-y := grsec_hidesym.o
77358+$(obj)/grsec_hidesym.o:
77359+ @-chmod -f 500 /boot
77360+ @-chmod -f 500 /lib/modules
77361+ @-chmod -f 500 /lib64/modules
77362+ @-chmod -f 500 /lib32/modules
77363+ @-chmod -f 700 .
77364+ @-chmod -f 700 $(objtree)
77365+ @echo ' grsec: protected kernel image paths'
77366+endif
77367diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
77368new file mode 100644
77369index 0000000..811af1f
77370--- /dev/null
77371+++ b/grsecurity/gracl.c
77372@@ -0,0 +1,2749 @@
77373+#include <linux/kernel.h>
77374+#include <linux/module.h>
77375+#include <linux/sched.h>
77376+#include <linux/mm.h>
77377+#include <linux/file.h>
77378+#include <linux/fs.h>
77379+#include <linux/namei.h>
77380+#include <linux/mount.h>
77381+#include <linux/tty.h>
77382+#include <linux/proc_fs.h>
77383+#include <linux/lglock.h>
77384+#include <linux/slab.h>
77385+#include <linux/vmalloc.h>
77386+#include <linux/types.h>
77387+#include <linux/sysctl.h>
77388+#include <linux/netdevice.h>
77389+#include <linux/ptrace.h>
77390+#include <linux/gracl.h>
77391+#include <linux/gralloc.h>
77392+#include <linux/security.h>
77393+#include <linux/grinternal.h>
77394+#include <linux/pid_namespace.h>
77395+#include <linux/stop_machine.h>
77396+#include <linux/fdtable.h>
77397+#include <linux/percpu.h>
77398+#include <linux/lglock.h>
77399+#include <linux/hugetlb.h>
77400+#include <linux/posix-timers.h>
77401+#include <linux/prefetch.h>
77402+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77403+#include <linux/magic.h>
77404+#include <linux/pagemap.h>
77405+#include "../fs/btrfs/async-thread.h"
77406+#include "../fs/btrfs/ctree.h"
77407+#include "../fs/btrfs/btrfs_inode.h"
77408+#endif
77409+#include "../fs/mount.h"
77410+
77411+#include <asm/uaccess.h>
77412+#include <asm/errno.h>
77413+#include <asm/mman.h>
77414+
77415+#define FOR_EACH_ROLE_START(role) \
77416+ role = running_polstate.role_list; \
77417+ while (role) {
77418+
77419+#define FOR_EACH_ROLE_END(role) \
77420+ role = role->prev; \
77421+ }
77422+
77423+extern struct path gr_real_root;
77424+
77425+static struct gr_policy_state running_polstate;
77426+struct gr_policy_state *polstate = &running_polstate;
77427+extern struct gr_alloc_state *current_alloc_state;
77428+
77429+extern char *gr_shared_page[4];
77430+DEFINE_RWLOCK(gr_inode_lock);
77431+
77432+static unsigned int gr_status __read_only = GR_STATUS_INIT;
77433+
77434+#ifdef CONFIG_NET
77435+extern struct vfsmount *sock_mnt;
77436+#endif
77437+
77438+extern struct vfsmount *pipe_mnt;
77439+extern struct vfsmount *shm_mnt;
77440+
77441+#ifdef CONFIG_HUGETLBFS
77442+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
77443+#endif
77444+
77445+extern u16 acl_sp_role_value;
77446+extern struct acl_object_label *fakefs_obj_rw;
77447+extern struct acl_object_label *fakefs_obj_rwx;
77448+
77449+int gr_acl_is_enabled(void)
77450+{
77451+ return (gr_status & GR_READY);
77452+}
77453+
77454+void gr_enable_rbac_system(void)
77455+{
77456+ pax_open_kernel();
77457+ gr_status |= GR_READY;
77458+ pax_close_kernel();
77459+}
77460+
77461+int gr_rbac_disable(void *unused)
77462+{
77463+ pax_open_kernel();
77464+ gr_status &= ~GR_READY;
77465+ pax_close_kernel();
77466+
77467+ return 0;
77468+}
77469+
77470+static inline dev_t __get_dev(const struct dentry *dentry)
77471+{
77472+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77473+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77474+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
77475+ else
77476+#endif
77477+ return dentry->d_sb->s_dev;
77478+}
77479+
77480+static inline u64 __get_ino(const struct dentry *dentry)
77481+{
77482+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77483+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77484+ return btrfs_ino(dentry->d_inode);
77485+ else
77486+#endif
77487+ return dentry->d_inode->i_ino;
77488+}
77489+
77490+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77491+{
77492+ return __get_dev(dentry);
77493+}
77494+
77495+u64 gr_get_ino_from_dentry(struct dentry *dentry)
77496+{
77497+ return __get_ino(dentry);
77498+}
77499+
77500+static char gr_task_roletype_to_char(struct task_struct *task)
77501+{
77502+ switch (task->role->roletype &
77503+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
77504+ GR_ROLE_SPECIAL)) {
77505+ case GR_ROLE_DEFAULT:
77506+ return 'D';
77507+ case GR_ROLE_USER:
77508+ return 'U';
77509+ case GR_ROLE_GROUP:
77510+ return 'G';
77511+ case GR_ROLE_SPECIAL:
77512+ return 'S';
77513+ }
77514+
77515+ return 'X';
77516+}
77517+
77518+char gr_roletype_to_char(void)
77519+{
77520+ return gr_task_roletype_to_char(current);
77521+}
77522+
77523+int
77524+gr_acl_tpe_check(void)
77525+{
77526+ if (unlikely(!(gr_status & GR_READY)))
77527+ return 0;
77528+ if (current->role->roletype & GR_ROLE_TPE)
77529+ return 1;
77530+ else
77531+ return 0;
77532+}
77533+
77534+int
77535+gr_handle_rawio(const struct inode *inode)
77536+{
77537+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77538+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
77539+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
77540+ !capable(CAP_SYS_RAWIO))
77541+ return 1;
77542+#endif
77543+ return 0;
77544+}
77545+
77546+int
77547+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
77548+{
77549+ if (likely(lena != lenb))
77550+ return 0;
77551+
77552+ return !memcmp(a, b, lena);
77553+}
77554+
77555+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
77556+{
77557+ *buflen -= namelen;
77558+ if (*buflen < 0)
77559+ return -ENAMETOOLONG;
77560+ *buffer -= namelen;
77561+ memcpy(*buffer, str, namelen);
77562+ return 0;
77563+}
77564+
77565+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
77566+{
77567+ return prepend(buffer, buflen, name->name, name->len);
77568+}
77569+
77570+static int prepend_path(const struct path *path, struct path *root,
77571+ char **buffer, int *buflen)
77572+{
77573+ struct dentry *dentry = path->dentry;
77574+ struct vfsmount *vfsmnt = path->mnt;
77575+ struct mount *mnt = real_mount(vfsmnt);
77576+ bool slash = false;
77577+ int error = 0;
77578+
77579+ while (dentry != root->dentry || vfsmnt != root->mnt) {
77580+ struct dentry * parent;
77581+
77582+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
77583+ /* Global root? */
77584+ if (!mnt_has_parent(mnt)) {
77585+ goto out;
77586+ }
77587+ dentry = mnt->mnt_mountpoint;
77588+ mnt = mnt->mnt_parent;
77589+ vfsmnt = &mnt->mnt;
77590+ continue;
77591+ }
77592+ parent = dentry->d_parent;
77593+ prefetch(parent);
77594+ spin_lock(&dentry->d_lock);
77595+ error = prepend_name(buffer, buflen, &dentry->d_name);
77596+ spin_unlock(&dentry->d_lock);
77597+ if (!error)
77598+ error = prepend(buffer, buflen, "/", 1);
77599+ if (error)
77600+ break;
77601+
77602+ slash = true;
77603+ dentry = parent;
77604+ }
77605+
77606+out:
77607+ if (!error && !slash)
77608+ error = prepend(buffer, buflen, "/", 1);
77609+
77610+ return error;
77611+}
77612+
77613+/* this must be called with mount_lock and rename_lock held */
77614+
77615+static char *__our_d_path(const struct path *path, struct path *root,
77616+ char *buf, int buflen)
77617+{
77618+ char *res = buf + buflen;
77619+ int error;
77620+
77621+ prepend(&res, &buflen, "\0", 1);
77622+ error = prepend_path(path, root, &res, &buflen);
77623+ if (error)
77624+ return ERR_PTR(error);
77625+
77626+ return res;
77627+}
77628+
77629+static char *
77630+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
77631+{
77632+ char *retval;
77633+
77634+ retval = __our_d_path(path, root, buf, buflen);
77635+ if (unlikely(IS_ERR(retval)))
77636+ retval = strcpy(buf, "<path too long>");
77637+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
77638+ retval[1] = '\0';
77639+
77640+ return retval;
77641+}
77642+
77643+static char *
77644+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
77645+ char *buf, int buflen)
77646+{
77647+ struct path path;
77648+ char *res;
77649+
77650+ path.dentry = (struct dentry *)dentry;
77651+ path.mnt = (struct vfsmount *)vfsmnt;
77652+
77653+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
77654+ by the RBAC system */
77655+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
77656+
77657+ return res;
77658+}
77659+
77660+static char *
77661+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
77662+ char *buf, int buflen)
77663+{
77664+ char *res;
77665+ struct path path;
77666+ struct path root;
77667+ struct task_struct *reaper = init_pid_ns.child_reaper;
77668+
77669+ path.dentry = (struct dentry *)dentry;
77670+ path.mnt = (struct vfsmount *)vfsmnt;
77671+
77672+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
77673+ get_fs_root(reaper->fs, &root);
77674+
77675+ read_seqlock_excl(&mount_lock);
77676+ write_seqlock(&rename_lock);
77677+ res = gen_full_path(&path, &root, buf, buflen);
77678+ write_sequnlock(&rename_lock);
77679+ read_sequnlock_excl(&mount_lock);
77680+
77681+ path_put(&root);
77682+ return res;
77683+}
77684+
77685+char *
77686+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
77687+{
77688+ char *ret;
77689+ read_seqlock_excl(&mount_lock);
77690+ write_seqlock(&rename_lock);
77691+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
77692+ PAGE_SIZE);
77693+ write_sequnlock(&rename_lock);
77694+ read_sequnlock_excl(&mount_lock);
77695+ return ret;
77696+}
77697+
77698+static char *
77699+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
77700+{
77701+ char *ret;
77702+ char *buf;
77703+ int buflen;
77704+
77705+ read_seqlock_excl(&mount_lock);
77706+ write_seqlock(&rename_lock);
77707+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
77708+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
77709+ buflen = (int)(ret - buf);
77710+ if (buflen >= 5)
77711+ prepend(&ret, &buflen, "/proc", 5);
77712+ else
77713+ ret = strcpy(buf, "<path too long>");
77714+ write_sequnlock(&rename_lock);
77715+ read_sequnlock_excl(&mount_lock);
77716+ return ret;
77717+}
77718+
77719+char *
77720+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
77721+{
77722+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
77723+ PAGE_SIZE);
77724+}
77725+
77726+char *
77727+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
77728+{
77729+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
77730+ PAGE_SIZE);
77731+}
77732+
77733+char *
77734+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
77735+{
77736+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
77737+ PAGE_SIZE);
77738+}
77739+
77740+char *
77741+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
77742+{
77743+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
77744+ PAGE_SIZE);
77745+}
77746+
77747+char *
77748+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
77749+{
77750+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
77751+ PAGE_SIZE);
77752+}
77753+
77754+__u32
77755+to_gr_audit(const __u32 reqmode)
77756+{
77757+ /* masks off auditable permission flags, then shifts them to create
77758+ auditing flags, and adds the special case of append auditing if
77759+ we're requesting write */
77760+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
77761+}
77762+
77763+struct acl_role_label *
77764+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
77765+ const gid_t gid)
77766+{
77767+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
77768+ struct acl_role_label *match;
77769+ struct role_allowed_ip *ipp;
77770+ unsigned int x;
77771+ u32 curr_ip = task->signal->saved_ip;
77772+
77773+ match = state->acl_role_set.r_hash[index];
77774+
77775+ while (match) {
77776+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
77777+ for (x = 0; x < match->domain_child_num; x++) {
77778+ if (match->domain_children[x] == uid)
77779+ goto found;
77780+ }
77781+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
77782+ break;
77783+ match = match->next;
77784+ }
77785+found:
77786+ if (match == NULL) {
77787+ try_group:
77788+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
77789+ match = state->acl_role_set.r_hash[index];
77790+
77791+ while (match) {
77792+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
77793+ for (x = 0; x < match->domain_child_num; x++) {
77794+ if (match->domain_children[x] == gid)
77795+ goto found2;
77796+ }
77797+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
77798+ break;
77799+ match = match->next;
77800+ }
77801+found2:
77802+ if (match == NULL)
77803+ match = state->default_role;
77804+ if (match->allowed_ips == NULL)
77805+ return match;
77806+ else {
77807+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
77808+ if (likely
77809+ ((ntohl(curr_ip) & ipp->netmask) ==
77810+ (ntohl(ipp->addr) & ipp->netmask)))
77811+ return match;
77812+ }
77813+ match = state->default_role;
77814+ }
77815+ } else if (match->allowed_ips == NULL) {
77816+ return match;
77817+ } else {
77818+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
77819+ if (likely
77820+ ((ntohl(curr_ip) & ipp->netmask) ==
77821+ (ntohl(ipp->addr) & ipp->netmask)))
77822+ return match;
77823+ }
77824+ goto try_group;
77825+ }
77826+
77827+ return match;
77828+}
77829+
77830+static struct acl_role_label *
77831+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
77832+ const gid_t gid)
77833+{
77834+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
77835+}
77836+
77837+struct acl_subject_label *
77838+lookup_acl_subj_label(const u64 ino, const dev_t dev,
77839+ const struct acl_role_label *role)
77840+{
77841+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
77842+ struct acl_subject_label *match;
77843+
77844+ match = role->subj_hash[index];
77845+
77846+ while (match && (match->inode != ino || match->device != dev ||
77847+ (match->mode & GR_DELETED))) {
77848+ match = match->next;
77849+ }
77850+
77851+ if (match && !(match->mode & GR_DELETED))
77852+ return match;
77853+ else
77854+ return NULL;
77855+}
77856+
77857+struct acl_subject_label *
77858+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
77859+ const struct acl_role_label *role)
77860+{
77861+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
77862+ struct acl_subject_label *match;
77863+
77864+ match = role->subj_hash[index];
77865+
77866+ while (match && (match->inode != ino || match->device != dev ||
77867+ !(match->mode & GR_DELETED))) {
77868+ match = match->next;
77869+ }
77870+
77871+ if (match && (match->mode & GR_DELETED))
77872+ return match;
77873+ else
77874+ return NULL;
77875+}
77876+
77877+static struct acl_object_label *
77878+lookup_acl_obj_label(const u64 ino, const dev_t dev,
77879+ const struct acl_subject_label *subj)
77880+{
77881+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
77882+ struct acl_object_label *match;
77883+
77884+ match = subj->obj_hash[index];
77885+
77886+ while (match && (match->inode != ino || match->device != dev ||
77887+ (match->mode & GR_DELETED))) {
77888+ match = match->next;
77889+ }
77890+
77891+ if (match && !(match->mode & GR_DELETED))
77892+ return match;
77893+ else
77894+ return NULL;
77895+}
77896+
77897+static struct acl_object_label *
77898+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
77899+ const struct acl_subject_label *subj)
77900+{
77901+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
77902+ struct acl_object_label *match;
77903+
77904+ match = subj->obj_hash[index];
77905+
77906+ while (match && (match->inode != ino || match->device != dev ||
77907+ !(match->mode & GR_DELETED))) {
77908+ match = match->next;
77909+ }
77910+
77911+ if (match && (match->mode & GR_DELETED))
77912+ return match;
77913+
77914+ match = subj->obj_hash[index];
77915+
77916+ while (match && (match->inode != ino || match->device != dev ||
77917+ (match->mode & GR_DELETED))) {
77918+ match = match->next;
77919+ }
77920+
77921+ if (match && !(match->mode & GR_DELETED))
77922+ return match;
77923+ else
77924+ return NULL;
77925+}
77926+
77927+struct name_entry *
77928+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
77929+{
77930+ unsigned int len = strlen(name);
77931+ unsigned int key = full_name_hash(name, len);
77932+ unsigned int index = key % state->name_set.n_size;
77933+ struct name_entry *match;
77934+
77935+ match = state->name_set.n_hash[index];
77936+
77937+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
77938+ match = match->next;
77939+
77940+ return match;
77941+}
77942+
77943+static struct name_entry *
77944+lookup_name_entry(const char *name)
77945+{
77946+ return __lookup_name_entry(&running_polstate, name);
77947+}
77948+
77949+static struct name_entry *
77950+lookup_name_entry_create(const char *name)
77951+{
77952+ unsigned int len = strlen(name);
77953+ unsigned int key = full_name_hash(name, len);
77954+ unsigned int index = key % running_polstate.name_set.n_size;
77955+ struct name_entry *match;
77956+
77957+ match = running_polstate.name_set.n_hash[index];
77958+
77959+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
77960+ !match->deleted))
77961+ match = match->next;
77962+
77963+ if (match && match->deleted)
77964+ return match;
77965+
77966+ match = running_polstate.name_set.n_hash[index];
77967+
77968+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
77969+ match->deleted))
77970+ match = match->next;
77971+
77972+ if (match && !match->deleted)
77973+ return match;
77974+ else
77975+ return NULL;
77976+}
77977+
77978+static struct inodev_entry *
77979+lookup_inodev_entry(const u64 ino, const dev_t dev)
77980+{
77981+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
77982+ struct inodev_entry *match;
77983+
77984+ match = running_polstate.inodev_set.i_hash[index];
77985+
77986+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
77987+ match = match->next;
77988+
77989+ return match;
77990+}
77991+
77992+void
77993+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
77994+{
77995+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
77996+ state->inodev_set.i_size);
77997+ struct inodev_entry **curr;
77998+
77999+ entry->prev = NULL;
78000+
78001+ curr = &state->inodev_set.i_hash[index];
78002+ if (*curr != NULL)
78003+ (*curr)->prev = entry;
78004+
78005+ entry->next = *curr;
78006+ *curr = entry;
78007+
78008+ return;
78009+}
78010+
78011+static void
78012+insert_inodev_entry(struct inodev_entry *entry)
78013+{
78014+ __insert_inodev_entry(&running_polstate, entry);
78015+}
78016+
78017+void
78018+insert_acl_obj_label(struct acl_object_label *obj,
78019+ struct acl_subject_label *subj)
78020+{
78021+ unsigned int index =
78022+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
78023+ struct acl_object_label **curr;
78024+
78025+ obj->prev = NULL;
78026+
78027+ curr = &subj->obj_hash[index];
78028+ if (*curr != NULL)
78029+ (*curr)->prev = obj;
78030+
78031+ obj->next = *curr;
78032+ *curr = obj;
78033+
78034+ return;
78035+}
78036+
78037+void
78038+insert_acl_subj_label(struct acl_subject_label *obj,
78039+ struct acl_role_label *role)
78040+{
78041+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
78042+ struct acl_subject_label **curr;
78043+
78044+ obj->prev = NULL;
78045+
78046+ curr = &role->subj_hash[index];
78047+ if (*curr != NULL)
78048+ (*curr)->prev = obj;
78049+
78050+ obj->next = *curr;
78051+ *curr = obj;
78052+
78053+ return;
78054+}
78055+
78056+/* derived from glibc fnmatch() 0: match, 1: no match*/
78057+
78058+static int
78059+glob_match(const char *p, const char *n)
78060+{
78061+ char c;
78062+
78063+ while ((c = *p++) != '\0') {
78064+ switch (c) {
78065+ case '?':
78066+ if (*n == '\0')
78067+ return 1;
78068+ else if (*n == '/')
78069+ return 1;
78070+ break;
78071+ case '\\':
78072+ if (*n != c)
78073+ return 1;
78074+ break;
78075+ case '*':
78076+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
78077+ if (*n == '/')
78078+ return 1;
78079+ else if (c == '?') {
78080+ if (*n == '\0')
78081+ return 1;
78082+ else
78083+ ++n;
78084+ }
78085+ }
78086+ if (c == '\0') {
78087+ return 0;
78088+ } else {
78089+ const char *endp;
78090+
78091+ if ((endp = strchr(n, '/')) == NULL)
78092+ endp = n + strlen(n);
78093+
78094+ if (c == '[') {
78095+ for (--p; n < endp; ++n)
78096+ if (!glob_match(p, n))
78097+ return 0;
78098+ } else if (c == '/') {
78099+ while (*n != '\0' && *n != '/')
78100+ ++n;
78101+ if (*n == '/' && !glob_match(p, n + 1))
78102+ return 0;
78103+ } else {
78104+ for (--p; n < endp; ++n)
78105+ if (*n == c && !glob_match(p, n))
78106+ return 0;
78107+ }
78108+
78109+ return 1;
78110+ }
78111+ case '[':
78112+ {
78113+ int not;
78114+ char cold;
78115+
78116+ if (*n == '\0' || *n == '/')
78117+ return 1;
78118+
78119+ not = (*p == '!' || *p == '^');
78120+ if (not)
78121+ ++p;
78122+
78123+ c = *p++;
78124+ for (;;) {
78125+ unsigned char fn = (unsigned char)*n;
78126+
78127+ if (c == '\0')
78128+ return 1;
78129+ else {
78130+ if (c == fn)
78131+ goto matched;
78132+ cold = c;
78133+ c = *p++;
78134+
78135+ if (c == '-' && *p != ']') {
78136+ unsigned char cend = *p++;
78137+
78138+ if (cend == '\0')
78139+ return 1;
78140+
78141+ if (cold <= fn && fn <= cend)
78142+ goto matched;
78143+
78144+ c = *p++;
78145+ }
78146+ }
78147+
78148+ if (c == ']')
78149+ break;
78150+ }
78151+ if (!not)
78152+ return 1;
78153+ break;
78154+ matched:
78155+ while (c != ']') {
78156+ if (c == '\0')
78157+ return 1;
78158+
78159+ c = *p++;
78160+ }
78161+ if (not)
78162+ return 1;
78163+ }
78164+ break;
78165+ default:
78166+ if (c != *n)
78167+ return 1;
78168+ }
78169+
78170+ ++n;
78171+ }
78172+
78173+ if (*n == '\0')
78174+ return 0;
78175+
78176+ if (*n == '/')
78177+ return 0;
78178+
78179+ return 1;
78180+}
78181+
78182+static struct acl_object_label *
78183+chk_glob_label(struct acl_object_label *globbed,
78184+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
78185+{
78186+ struct acl_object_label *tmp;
78187+
78188+ if (*path == NULL)
78189+ *path = gr_to_filename_nolock(dentry, mnt);
78190+
78191+ tmp = globbed;
78192+
78193+ while (tmp) {
78194+ if (!glob_match(tmp->filename, *path))
78195+ return tmp;
78196+ tmp = tmp->next;
78197+ }
78198+
78199+ return NULL;
78200+}
78201+
78202+static struct acl_object_label *
78203+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
78204+ const u64 curr_ino, const dev_t curr_dev,
78205+ const struct acl_subject_label *subj, char **path, const int checkglob)
78206+{
78207+ struct acl_subject_label *tmpsubj;
78208+ struct acl_object_label *retval;
78209+ struct acl_object_label *retval2;
78210+
78211+ tmpsubj = (struct acl_subject_label *) subj;
78212+ read_lock(&gr_inode_lock);
78213+ do {
78214+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
78215+ if (retval) {
78216+ if (checkglob && retval->globbed) {
78217+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
78218+ if (retval2)
78219+ retval = retval2;
78220+ }
78221+ break;
78222+ }
78223+ } while ((tmpsubj = tmpsubj->parent_subject));
78224+ read_unlock(&gr_inode_lock);
78225+
78226+ return retval;
78227+}
78228+
78229+static struct acl_object_label *
78230+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
78231+ struct dentry *curr_dentry,
78232+ const struct acl_subject_label *subj, char **path, const int checkglob)
78233+{
78234+ int newglob = checkglob;
78235+ u64 inode;
78236+ dev_t device;
78237+
78238+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
78239+ as we don't want a / * rule to match instead of the / object
78240+ don't do this for create lookups that call this function though, since they're looking up
78241+ on the parent and thus need globbing checks on all paths
78242+ */
78243+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
78244+ newglob = GR_NO_GLOB;
78245+
78246+ spin_lock(&curr_dentry->d_lock);
78247+ inode = __get_ino(curr_dentry);
78248+ device = __get_dev(curr_dentry);
78249+ spin_unlock(&curr_dentry->d_lock);
78250+
78251+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
78252+}
78253+
78254+#ifdef CONFIG_HUGETLBFS
78255+static inline bool
78256+is_hugetlbfs_mnt(const struct vfsmount *mnt)
78257+{
78258+ int i;
78259+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
78260+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
78261+ return true;
78262+ }
78263+
78264+ return false;
78265+}
78266+#endif
78267+
78268+static struct acl_object_label *
78269+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78270+ const struct acl_subject_label *subj, char *path, const int checkglob)
78271+{
78272+ struct dentry *dentry = (struct dentry *) l_dentry;
78273+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
78274+ struct mount *real_mnt = real_mount(mnt);
78275+ struct acl_object_label *retval;
78276+ struct dentry *parent;
78277+
78278+ read_seqlock_excl(&mount_lock);
78279+ write_seqlock(&rename_lock);
78280+
78281+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
78282+#ifdef CONFIG_NET
78283+ mnt == sock_mnt ||
78284+#endif
78285+#ifdef CONFIG_HUGETLBFS
78286+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
78287+#endif
78288+ /* ignore Eric Biederman */
78289+ IS_PRIVATE(l_dentry->d_inode))) {
78290+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
78291+ goto out;
78292+ }
78293+
78294+ for (;;) {
78295+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
78296+ break;
78297+
78298+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
78299+ if (!mnt_has_parent(real_mnt))
78300+ break;
78301+
78302+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78303+ if (retval != NULL)
78304+ goto out;
78305+
78306+ dentry = real_mnt->mnt_mountpoint;
78307+ real_mnt = real_mnt->mnt_parent;
78308+ mnt = &real_mnt->mnt;
78309+ continue;
78310+ }
78311+
78312+ parent = dentry->d_parent;
78313+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78314+ if (retval != NULL)
78315+ goto out;
78316+
78317+ dentry = parent;
78318+ }
78319+
78320+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78321+
78322+ /* gr_real_root is pinned so we don't have to hold a reference */
78323+ if (retval == NULL)
78324+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
78325+out:
78326+ write_sequnlock(&rename_lock);
78327+ read_sequnlock_excl(&mount_lock);
78328+
78329+ BUG_ON(retval == NULL);
78330+
78331+ return retval;
78332+}
78333+
78334+static struct acl_object_label *
78335+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78336+ const struct acl_subject_label *subj)
78337+{
78338+ char *path = NULL;
78339+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
78340+}
78341+
78342+static struct acl_object_label *
78343+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78344+ const struct acl_subject_label *subj)
78345+{
78346+ char *path = NULL;
78347+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
78348+}
78349+
78350+static struct acl_object_label *
78351+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78352+ const struct acl_subject_label *subj, char *path)
78353+{
78354+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
78355+}
78356+
78357+struct acl_subject_label *
78358+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78359+ const struct acl_role_label *role)
78360+{
78361+ struct dentry *dentry = (struct dentry *) l_dentry;
78362+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
78363+ struct mount *real_mnt = real_mount(mnt);
78364+ struct acl_subject_label *retval;
78365+ struct dentry *parent;
78366+
78367+ read_seqlock_excl(&mount_lock);
78368+ write_seqlock(&rename_lock);
78369+
78370+ for (;;) {
78371+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
78372+ break;
78373+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
78374+ if (!mnt_has_parent(real_mnt))
78375+ break;
78376+
78377+ spin_lock(&dentry->d_lock);
78378+ read_lock(&gr_inode_lock);
78379+ retval =
78380+ lookup_acl_subj_label(__get_ino(dentry),
78381+ __get_dev(dentry), role);
78382+ read_unlock(&gr_inode_lock);
78383+ spin_unlock(&dentry->d_lock);
78384+ if (retval != NULL)
78385+ goto out;
78386+
78387+ dentry = real_mnt->mnt_mountpoint;
78388+ real_mnt = real_mnt->mnt_parent;
78389+ mnt = &real_mnt->mnt;
78390+ continue;
78391+ }
78392+
78393+ spin_lock(&dentry->d_lock);
78394+ read_lock(&gr_inode_lock);
78395+ retval = lookup_acl_subj_label(__get_ino(dentry),
78396+ __get_dev(dentry), role);
78397+ read_unlock(&gr_inode_lock);
78398+ parent = dentry->d_parent;
78399+ spin_unlock(&dentry->d_lock);
78400+
78401+ if (retval != NULL)
78402+ goto out;
78403+
78404+ dentry = parent;
78405+ }
78406+
78407+ spin_lock(&dentry->d_lock);
78408+ read_lock(&gr_inode_lock);
78409+ retval = lookup_acl_subj_label(__get_ino(dentry),
78410+ __get_dev(dentry), role);
78411+ read_unlock(&gr_inode_lock);
78412+ spin_unlock(&dentry->d_lock);
78413+
78414+ if (unlikely(retval == NULL)) {
78415+ /* gr_real_root is pinned, we don't need to hold a reference */
78416+ read_lock(&gr_inode_lock);
78417+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
78418+ __get_dev(gr_real_root.dentry), role);
78419+ read_unlock(&gr_inode_lock);
78420+ }
78421+out:
78422+ write_sequnlock(&rename_lock);
78423+ read_sequnlock_excl(&mount_lock);
78424+
78425+ BUG_ON(retval == NULL);
78426+
78427+ return retval;
78428+}
78429+
78430+void
78431+assign_special_role(const char *rolename)
78432+{
78433+ struct acl_object_label *obj;
78434+ struct acl_role_label *r;
78435+ struct acl_role_label *assigned = NULL;
78436+ struct task_struct *tsk;
78437+ struct file *filp;
78438+
78439+ FOR_EACH_ROLE_START(r)
78440+ if (!strcmp(rolename, r->rolename) &&
78441+ (r->roletype & GR_ROLE_SPECIAL)) {
78442+ assigned = r;
78443+ break;
78444+ }
78445+ FOR_EACH_ROLE_END(r)
78446+
78447+ if (!assigned)
78448+ return;
78449+
78450+ read_lock(&tasklist_lock);
78451+ read_lock(&grsec_exec_file_lock);
78452+
78453+ tsk = current->real_parent;
78454+ if (tsk == NULL)
78455+ goto out_unlock;
78456+
78457+ filp = tsk->exec_file;
78458+ if (filp == NULL)
78459+ goto out_unlock;
78460+
78461+ tsk->is_writable = 0;
78462+ tsk->inherited = 0;
78463+
78464+ tsk->acl_sp_role = 1;
78465+ tsk->acl_role_id = ++acl_sp_role_value;
78466+ tsk->role = assigned;
78467+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
78468+
78469+ /* ignore additional mmap checks for processes that are writable
78470+ by the default ACL */
78471+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
78472+ if (unlikely(obj->mode & GR_WRITE))
78473+ tsk->is_writable = 1;
78474+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
78475+ if (unlikely(obj->mode & GR_WRITE))
78476+ tsk->is_writable = 1;
78477+
78478+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78479+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
78480+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
78481+#endif
78482+
78483+out_unlock:
78484+ read_unlock(&grsec_exec_file_lock);
78485+ read_unlock(&tasklist_lock);
78486+ return;
78487+}
78488+
78489+
78490+static void
78491+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
78492+{
78493+ struct task_struct *task = current;
78494+ const struct cred *cred = current_cred();
78495+
78496+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
78497+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78498+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78499+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
78500+
78501+ return;
78502+}
78503+
78504+static void
78505+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
78506+{
78507+ struct task_struct *task = current;
78508+ const struct cred *cred = current_cred();
78509+
78510+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
78511+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78512+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78513+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
78514+
78515+ return;
78516+}
78517+
78518+static void
78519+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
78520+{
78521+ struct task_struct *task = current;
78522+ const struct cred *cred = current_cred();
78523+
78524+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
78525+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78526+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78527+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
78528+
78529+ return;
78530+}
78531+
78532+static void
78533+gr_set_proc_res(struct task_struct *task)
78534+{
78535+ struct acl_subject_label *proc;
78536+ unsigned short i;
78537+
78538+ proc = task->acl;
78539+
78540+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
78541+ return;
78542+
78543+ for (i = 0; i < RLIM_NLIMITS; i++) {
78544+ unsigned long rlim_cur, rlim_max;
78545+
78546+ if (!(proc->resmask & (1U << i)))
78547+ continue;
78548+
78549+ rlim_cur = proc->res[i].rlim_cur;
78550+ rlim_max = proc->res[i].rlim_max;
78551+
78552+ if (i == RLIMIT_NOFILE) {
78553+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
78554+ if (rlim_cur > saved_sysctl_nr_open)
78555+ rlim_cur = saved_sysctl_nr_open;
78556+ if (rlim_max > saved_sysctl_nr_open)
78557+ rlim_max = saved_sysctl_nr_open;
78558+ }
78559+
78560+ task->signal->rlim[i].rlim_cur = rlim_cur;
78561+ task->signal->rlim[i].rlim_max = rlim_max;
78562+
78563+ if (i == RLIMIT_CPU)
78564+ update_rlimit_cpu(task, rlim_cur);
78565+ }
78566+
78567+ return;
78568+}
78569+
78570+/* both of the below must be called with
78571+ rcu_read_lock();
78572+ read_lock(&tasklist_lock);
78573+ read_lock(&grsec_exec_file_lock);
78574+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
78575+*/
78576+
78577+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
78578+{
78579+ char *tmpname;
78580+ struct acl_subject_label *tmpsubj;
78581+ struct file *filp;
78582+ struct name_entry *nmatch;
78583+
78584+ filp = task->exec_file;
78585+ if (filp == NULL)
78586+ return NULL;
78587+
78588+ /* the following is to apply the correct subject
78589+ on binaries running when the RBAC system
78590+ is enabled, when the binaries have been
78591+ replaced or deleted since their execution
78592+ -----
78593+ when the RBAC system starts, the inode/dev
78594+ from exec_file will be one the RBAC system
78595+ is unaware of. It only knows the inode/dev
78596+ of the present file on disk, or the absence
78597+ of it.
78598+ */
78599+
78600+ if (filename)
78601+ nmatch = __lookup_name_entry(state, filename);
78602+ else {
78603+ preempt_disable();
78604+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
78605+
78606+ nmatch = __lookup_name_entry(state, tmpname);
78607+ preempt_enable();
78608+ }
78609+ tmpsubj = NULL;
78610+ if (nmatch) {
78611+ if (nmatch->deleted)
78612+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
78613+ else
78614+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
78615+ }
78616+ /* this also works for the reload case -- if we don't match a potentially inherited subject
78617+ then we fall back to a normal lookup based on the binary's ino/dev
78618+ */
78619+ if (tmpsubj == NULL && fallback)
78620+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
78621+
78622+ return tmpsubj;
78623+}
78624+
78625+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
78626+{
78627+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
78628+}
78629+
78630+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
78631+{
78632+ struct acl_object_label *obj;
78633+ struct file *filp;
78634+
78635+ filp = task->exec_file;
78636+
78637+ task->acl = subj;
78638+ task->is_writable = 0;
78639+ /* ignore additional mmap checks for processes that are writable
78640+ by the default ACL */
78641+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
78642+ if (unlikely(obj->mode & GR_WRITE))
78643+ task->is_writable = 1;
78644+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
78645+ if (unlikely(obj->mode & GR_WRITE))
78646+ task->is_writable = 1;
78647+
78648+ gr_set_proc_res(task);
78649+
78650+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78651+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
78652+#endif
78653+}
78654+
78655+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
78656+{
78657+ __gr_apply_subject_to_task(&running_polstate, task, subj);
78658+}
78659+
78660+__u32
78661+gr_search_file(const struct dentry * dentry, const __u32 mode,
78662+ const struct vfsmount * mnt)
78663+{
78664+ __u32 retval = mode;
78665+ struct acl_subject_label *curracl;
78666+ struct acl_object_label *currobj;
78667+
78668+ if (unlikely(!(gr_status & GR_READY)))
78669+ return (mode & ~GR_AUDITS);
78670+
78671+ curracl = current->acl;
78672+
78673+ currobj = chk_obj_label(dentry, mnt, curracl);
78674+ retval = currobj->mode & mode;
78675+
78676+ /* if we're opening a specified transfer file for writing
78677+ (e.g. /dev/initctl), then transfer our role to init
78678+ */
78679+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
78680+ current->role->roletype & GR_ROLE_PERSIST)) {
78681+ struct task_struct *task = init_pid_ns.child_reaper;
78682+
78683+ if (task->role != current->role) {
78684+ struct acl_subject_label *subj;
78685+
78686+ task->acl_sp_role = 0;
78687+ task->acl_role_id = current->acl_role_id;
78688+ task->role = current->role;
78689+ rcu_read_lock();
78690+ read_lock(&grsec_exec_file_lock);
78691+ subj = gr_get_subject_for_task(task, NULL, 1);
78692+ gr_apply_subject_to_task(task, subj);
78693+ read_unlock(&grsec_exec_file_lock);
78694+ rcu_read_unlock();
78695+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
78696+ }
78697+ }
78698+
78699+ if (unlikely
78700+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
78701+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
78702+ __u32 new_mode = mode;
78703+
78704+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78705+
78706+ retval = new_mode;
78707+
78708+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
78709+ new_mode |= GR_INHERIT;
78710+
78711+ if (!(mode & GR_NOLEARN))
78712+ gr_log_learn(dentry, mnt, new_mode);
78713+ }
78714+
78715+ return retval;
78716+}
78717+
78718+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
78719+ const struct dentry *parent,
78720+ const struct vfsmount *mnt)
78721+{
78722+ struct name_entry *match;
78723+ struct acl_object_label *matchpo;
78724+ struct acl_subject_label *curracl;
78725+ char *path;
78726+
78727+ if (unlikely(!(gr_status & GR_READY)))
78728+ return NULL;
78729+
78730+ preempt_disable();
78731+ path = gr_to_filename_rbac(new_dentry, mnt);
78732+ match = lookup_name_entry_create(path);
78733+
78734+ curracl = current->acl;
78735+
78736+ if (match) {
78737+ read_lock(&gr_inode_lock);
78738+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
78739+ read_unlock(&gr_inode_lock);
78740+
78741+ if (matchpo) {
78742+ preempt_enable();
78743+ return matchpo;
78744+ }
78745+ }
78746+
78747+ // lookup parent
78748+
78749+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
78750+
78751+ preempt_enable();
78752+ return matchpo;
78753+}
78754+
78755+__u32
78756+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
78757+ const struct vfsmount * mnt, const __u32 mode)
78758+{
78759+ struct acl_object_label *matchpo;
78760+ __u32 retval;
78761+
78762+ if (unlikely(!(gr_status & GR_READY)))
78763+ return (mode & ~GR_AUDITS);
78764+
78765+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
78766+
78767+ retval = matchpo->mode & mode;
78768+
78769+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
78770+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
78771+ __u32 new_mode = mode;
78772+
78773+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78774+
78775+ gr_log_learn(new_dentry, mnt, new_mode);
78776+ return new_mode;
78777+ }
78778+
78779+ return retval;
78780+}
78781+
78782+__u32
78783+gr_check_link(const struct dentry * new_dentry,
78784+ const struct dentry * parent_dentry,
78785+ const struct vfsmount * parent_mnt,
78786+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
78787+{
78788+ struct acl_object_label *obj;
78789+ __u32 oldmode, newmode;
78790+ __u32 needmode;
78791+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
78792+ GR_DELETE | GR_INHERIT;
78793+
78794+ if (unlikely(!(gr_status & GR_READY)))
78795+ return (GR_CREATE | GR_LINK);
78796+
78797+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
78798+ oldmode = obj->mode;
78799+
78800+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
78801+ newmode = obj->mode;
78802+
78803+ needmode = newmode & checkmodes;
78804+
78805+ // old name for hardlink must have at least the permissions of the new name
78806+ if ((oldmode & needmode) != needmode)
78807+ goto bad;
78808+
78809+ // if old name had restrictions/auditing, make sure the new name does as well
78810+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
78811+
78812+ // don't allow hardlinking of suid/sgid/fcapped files without permission
78813+ if (is_privileged_binary(old_dentry))
78814+ needmode |= GR_SETID;
78815+
78816+ if ((newmode & needmode) != needmode)
78817+ goto bad;
78818+
78819+ // enforce minimum permissions
78820+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
78821+ return newmode;
78822+bad:
78823+ needmode = oldmode;
78824+ if (is_privileged_binary(old_dentry))
78825+ needmode |= GR_SETID;
78826+
78827+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
78828+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
78829+ return (GR_CREATE | GR_LINK);
78830+ } else if (newmode & GR_SUPPRESS)
78831+ return GR_SUPPRESS;
78832+ else
78833+ return 0;
78834+}
78835+
78836+int
78837+gr_check_hidden_task(const struct task_struct *task)
78838+{
78839+ if (unlikely(!(gr_status & GR_READY)))
78840+ return 0;
78841+
78842+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
78843+ return 1;
78844+
78845+ return 0;
78846+}
78847+
78848+int
78849+gr_check_protected_task(const struct task_struct *task)
78850+{
78851+ if (unlikely(!(gr_status & GR_READY) || !task))
78852+ return 0;
78853+
78854+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
78855+ task->acl != current->acl)
78856+ return 1;
78857+
78858+ return 0;
78859+}
78860+
78861+int
78862+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
78863+{
78864+ struct task_struct *p;
78865+ int ret = 0;
78866+
78867+ if (unlikely(!(gr_status & GR_READY) || !pid))
78868+ return ret;
78869+
78870+ read_lock(&tasklist_lock);
78871+ do_each_pid_task(pid, type, p) {
78872+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
78873+ p->acl != current->acl) {
78874+ ret = 1;
78875+ goto out;
78876+ }
78877+ } while_each_pid_task(pid, type, p);
78878+out:
78879+ read_unlock(&tasklist_lock);
78880+
78881+ return ret;
78882+}
78883+
78884+void
78885+gr_copy_label(struct task_struct *tsk)
78886+{
78887+ struct task_struct *p = current;
78888+
78889+ tsk->inherited = p->inherited;
78890+ tsk->acl_sp_role = 0;
78891+ tsk->acl_role_id = p->acl_role_id;
78892+ tsk->acl = p->acl;
78893+ tsk->role = p->role;
78894+ tsk->signal->used_accept = 0;
78895+ tsk->signal->curr_ip = p->signal->curr_ip;
78896+ tsk->signal->saved_ip = p->signal->saved_ip;
78897+ if (p->exec_file)
78898+ get_file(p->exec_file);
78899+ tsk->exec_file = p->exec_file;
78900+ tsk->is_writable = p->is_writable;
78901+ if (unlikely(p->signal->used_accept)) {
78902+ p->signal->curr_ip = 0;
78903+ p->signal->saved_ip = 0;
78904+ }
78905+
78906+ return;
78907+}
78908+
78909+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
78910+
78911+int
78912+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
78913+{
78914+ unsigned int i;
78915+ __u16 num;
78916+ uid_t *uidlist;
78917+ uid_t curuid;
78918+ int realok = 0;
78919+ int effectiveok = 0;
78920+ int fsok = 0;
78921+ uid_t globalreal, globaleffective, globalfs;
78922+
78923+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
78924+ struct user_struct *user;
78925+
78926+ if (!uid_valid(real))
78927+ goto skipit;
78928+
78929+ /* find user based on global namespace */
78930+
78931+ globalreal = GR_GLOBAL_UID(real);
78932+
78933+ user = find_user(make_kuid(&init_user_ns, globalreal));
78934+ if (user == NULL)
78935+ goto skipit;
78936+
78937+ if (gr_process_kernel_setuid_ban(user)) {
78938+ /* for find_user */
78939+ free_uid(user);
78940+ return 1;
78941+ }
78942+
78943+ /* for find_user */
78944+ free_uid(user);
78945+
78946+skipit:
78947+#endif
78948+
78949+ if (unlikely(!(gr_status & GR_READY)))
78950+ return 0;
78951+
78952+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
78953+ gr_log_learn_uid_change(real, effective, fs);
78954+
78955+ num = current->acl->user_trans_num;
78956+ uidlist = current->acl->user_transitions;
78957+
78958+ if (uidlist == NULL)
78959+ return 0;
78960+
78961+ if (!uid_valid(real)) {
78962+ realok = 1;
78963+ globalreal = (uid_t)-1;
78964+ } else {
78965+ globalreal = GR_GLOBAL_UID(real);
78966+ }
78967+ if (!uid_valid(effective)) {
78968+ effectiveok = 1;
78969+ globaleffective = (uid_t)-1;
78970+ } else {
78971+ globaleffective = GR_GLOBAL_UID(effective);
78972+ }
78973+ if (!uid_valid(fs)) {
78974+ fsok = 1;
78975+ globalfs = (uid_t)-1;
78976+ } else {
78977+ globalfs = GR_GLOBAL_UID(fs);
78978+ }
78979+
78980+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
78981+ for (i = 0; i < num; i++) {
78982+ curuid = uidlist[i];
78983+ if (globalreal == curuid)
78984+ realok = 1;
78985+ if (globaleffective == curuid)
78986+ effectiveok = 1;
78987+ if (globalfs == curuid)
78988+ fsok = 1;
78989+ }
78990+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
78991+ for (i = 0; i < num; i++) {
78992+ curuid = uidlist[i];
78993+ if (globalreal == curuid)
78994+ break;
78995+ if (globaleffective == curuid)
78996+ break;
78997+ if (globalfs == curuid)
78998+ break;
78999+ }
79000+ /* not in deny list */
79001+ if (i == num) {
79002+ realok = 1;
79003+ effectiveok = 1;
79004+ fsok = 1;
79005+ }
79006+ }
79007+
79008+ if (realok && effectiveok && fsok)
79009+ return 0;
79010+ else {
79011+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
79012+ return 1;
79013+ }
79014+}
79015+
79016+int
79017+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
79018+{
79019+ unsigned int i;
79020+ __u16 num;
79021+ gid_t *gidlist;
79022+ gid_t curgid;
79023+ int realok = 0;
79024+ int effectiveok = 0;
79025+ int fsok = 0;
79026+ gid_t globalreal, globaleffective, globalfs;
79027+
79028+ if (unlikely(!(gr_status & GR_READY)))
79029+ return 0;
79030+
79031+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
79032+ gr_log_learn_gid_change(real, effective, fs);
79033+
79034+ num = current->acl->group_trans_num;
79035+ gidlist = current->acl->group_transitions;
79036+
79037+ if (gidlist == NULL)
79038+ return 0;
79039+
79040+ if (!gid_valid(real)) {
79041+ realok = 1;
79042+ globalreal = (gid_t)-1;
79043+ } else {
79044+ globalreal = GR_GLOBAL_GID(real);
79045+ }
79046+ if (!gid_valid(effective)) {
79047+ effectiveok = 1;
79048+ globaleffective = (gid_t)-1;
79049+ } else {
79050+ globaleffective = GR_GLOBAL_GID(effective);
79051+ }
79052+ if (!gid_valid(fs)) {
79053+ fsok = 1;
79054+ globalfs = (gid_t)-1;
79055+ } else {
79056+ globalfs = GR_GLOBAL_GID(fs);
79057+ }
79058+
79059+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
79060+ for (i = 0; i < num; i++) {
79061+ curgid = gidlist[i];
79062+ if (globalreal == curgid)
79063+ realok = 1;
79064+ if (globaleffective == curgid)
79065+ effectiveok = 1;
79066+ if (globalfs == curgid)
79067+ fsok = 1;
79068+ }
79069+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
79070+ for (i = 0; i < num; i++) {
79071+ curgid = gidlist[i];
79072+ if (globalreal == curgid)
79073+ break;
79074+ if (globaleffective == curgid)
79075+ break;
79076+ if (globalfs == curgid)
79077+ break;
79078+ }
79079+ /* not in deny list */
79080+ if (i == num) {
79081+ realok = 1;
79082+ effectiveok = 1;
79083+ fsok = 1;
79084+ }
79085+ }
79086+
79087+ if (realok && effectiveok && fsok)
79088+ return 0;
79089+ else {
79090+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
79091+ return 1;
79092+ }
79093+}
79094+
79095+extern int gr_acl_is_capable(const int cap);
79096+
79097+void
79098+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
79099+{
79100+ struct acl_role_label *role = task->role;
79101+ struct acl_role_label *origrole = role;
79102+ struct acl_subject_label *subj = NULL;
79103+ struct acl_object_label *obj;
79104+ struct file *filp;
79105+ uid_t uid;
79106+ gid_t gid;
79107+
79108+ if (unlikely(!(gr_status & GR_READY)))
79109+ return;
79110+
79111+ uid = GR_GLOBAL_UID(kuid);
79112+ gid = GR_GLOBAL_GID(kgid);
79113+
79114+ filp = task->exec_file;
79115+
79116+ /* kernel process, we'll give them the kernel role */
79117+ if (unlikely(!filp)) {
79118+ task->role = running_polstate.kernel_role;
79119+ task->acl = running_polstate.kernel_role->root_label;
79120+ return;
79121+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
79122+ /* save the current ip at time of role lookup so that the proper
79123+ IP will be learned for role_allowed_ip */
79124+ task->signal->saved_ip = task->signal->curr_ip;
79125+ role = lookup_acl_role_label(task, uid, gid);
79126+ }
79127+
79128+ /* don't change the role if we're not a privileged process */
79129+ if (role && task->role != role &&
79130+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
79131+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
79132+ return;
79133+
79134+ task->role = role;
79135+
79136+ if (task->inherited) {
79137+ /* if we reached our subject through inheritance, then first see
79138+ if there's a subject of the same name in the new role that has
79139+ an object that would result in the same inherited subject
79140+ */
79141+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
79142+ if (subj) {
79143+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
79144+ if (!(obj->mode & GR_INHERIT))
79145+ subj = NULL;
79146+ }
79147+
79148+ }
79149+ if (subj == NULL) {
79150+ /* otherwise:
79151+ perform subject lookup in possibly new role
79152+ we can use this result below in the case where role == task->role
79153+ */
79154+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
79155+ }
79156+
79157+ /* if we changed uid/gid, but result in the same role
79158+ and are using inheritance, don't lose the inherited subject
79159+ if current subject is other than what normal lookup
79160+ would result in, we arrived via inheritance, don't
79161+ lose subject
79162+ */
79163+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
79164+ (subj == task->acl)))
79165+ task->acl = subj;
79166+
79167+ /* leave task->inherited unaffected */
79168+
79169+ task->is_writable = 0;
79170+
79171+ /* ignore additional mmap checks for processes that are writable
79172+ by the default ACL */
79173+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
79174+ if (unlikely(obj->mode & GR_WRITE))
79175+ task->is_writable = 1;
79176+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
79177+ if (unlikely(obj->mode & GR_WRITE))
79178+ task->is_writable = 1;
79179+
79180+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
79181+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
79182+#endif
79183+
79184+ gr_set_proc_res(task);
79185+
79186+ return;
79187+}
79188+
79189+int
79190+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
79191+ const int unsafe_flags)
79192+{
79193+ struct task_struct *task = current;
79194+ struct acl_subject_label *newacl;
79195+ struct acl_object_label *obj;
79196+ __u32 retmode;
79197+
79198+ if (unlikely(!(gr_status & GR_READY)))
79199+ return 0;
79200+
79201+ newacl = chk_subj_label(dentry, mnt, task->role);
79202+
79203+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
79204+ did an exec
79205+ */
79206+ rcu_read_lock();
79207+ read_lock(&tasklist_lock);
79208+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
79209+ (task->parent->acl->mode & GR_POVERRIDE))) {
79210+ read_unlock(&tasklist_lock);
79211+ rcu_read_unlock();
79212+ goto skip_check;
79213+ }
79214+ read_unlock(&tasklist_lock);
79215+ rcu_read_unlock();
79216+
79217+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
79218+ !(task->role->roletype & GR_ROLE_GOD) &&
79219+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
79220+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
79221+ if (unsafe_flags & LSM_UNSAFE_SHARE)
79222+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
79223+ else
79224+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
79225+ return -EACCES;
79226+ }
79227+
79228+skip_check:
79229+
79230+ obj = chk_obj_label(dentry, mnt, task->acl);
79231+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
79232+
79233+ if (!(task->acl->mode & GR_INHERITLEARN) &&
79234+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
79235+ if (obj->nested)
79236+ task->acl = obj->nested;
79237+ else
79238+ task->acl = newacl;
79239+ task->inherited = 0;
79240+ } else {
79241+ task->inherited = 1;
79242+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
79243+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
79244+ }
79245+
79246+ task->is_writable = 0;
79247+
79248+ /* ignore additional mmap checks for processes that are writable
79249+ by the default ACL */
79250+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
79251+ if (unlikely(obj->mode & GR_WRITE))
79252+ task->is_writable = 1;
79253+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
79254+ if (unlikely(obj->mode & GR_WRITE))
79255+ task->is_writable = 1;
79256+
79257+ gr_set_proc_res(task);
79258+
79259+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
79260+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
79261+#endif
79262+ return 0;
79263+}
79264+
79265+/* always called with valid inodev ptr */
79266+static void
79267+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
79268+{
79269+ struct acl_object_label *matchpo;
79270+ struct acl_subject_label *matchps;
79271+ struct acl_subject_label *subj;
79272+ struct acl_role_label *role;
79273+ unsigned int x;
79274+
79275+ FOR_EACH_ROLE_START(role)
79276+ FOR_EACH_SUBJECT_START(role, subj, x)
79277+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
79278+ matchpo->mode |= GR_DELETED;
79279+ FOR_EACH_SUBJECT_END(subj,x)
79280+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
79281+ /* nested subjects aren't in the role's subj_hash table */
79282+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
79283+ matchpo->mode |= GR_DELETED;
79284+ FOR_EACH_NESTED_SUBJECT_END(subj)
79285+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
79286+ matchps->mode |= GR_DELETED;
79287+ FOR_EACH_ROLE_END(role)
79288+
79289+ inodev->nentry->deleted = 1;
79290+
79291+ return;
79292+}
79293+
79294+void
79295+gr_handle_delete(const u64 ino, const dev_t dev)
79296+{
79297+ struct inodev_entry *inodev;
79298+
79299+ if (unlikely(!(gr_status & GR_READY)))
79300+ return;
79301+
79302+ write_lock(&gr_inode_lock);
79303+ inodev = lookup_inodev_entry(ino, dev);
79304+ if (inodev != NULL)
79305+ do_handle_delete(inodev, ino, dev);
79306+ write_unlock(&gr_inode_lock);
79307+
79308+ return;
79309+}
79310+
79311+static void
79312+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
79313+ const u64 newinode, const dev_t newdevice,
79314+ struct acl_subject_label *subj)
79315+{
79316+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
79317+ struct acl_object_label *match;
79318+
79319+ match = subj->obj_hash[index];
79320+
79321+ while (match && (match->inode != oldinode ||
79322+ match->device != olddevice ||
79323+ !(match->mode & GR_DELETED)))
79324+ match = match->next;
79325+
79326+ if (match && (match->inode == oldinode)
79327+ && (match->device == olddevice)
79328+ && (match->mode & GR_DELETED)) {
79329+ if (match->prev == NULL) {
79330+ subj->obj_hash[index] = match->next;
79331+ if (match->next != NULL)
79332+ match->next->prev = NULL;
79333+ } else {
79334+ match->prev->next = match->next;
79335+ if (match->next != NULL)
79336+ match->next->prev = match->prev;
79337+ }
79338+ match->prev = NULL;
79339+ match->next = NULL;
79340+ match->inode = newinode;
79341+ match->device = newdevice;
79342+ match->mode &= ~GR_DELETED;
79343+
79344+ insert_acl_obj_label(match, subj);
79345+ }
79346+
79347+ return;
79348+}
79349+
79350+static void
79351+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
79352+ const u64 newinode, const dev_t newdevice,
79353+ struct acl_role_label *role)
79354+{
79355+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
79356+ struct acl_subject_label *match;
79357+
79358+ match = role->subj_hash[index];
79359+
79360+ while (match && (match->inode != oldinode ||
79361+ match->device != olddevice ||
79362+ !(match->mode & GR_DELETED)))
79363+ match = match->next;
79364+
79365+ if (match && (match->inode == oldinode)
79366+ && (match->device == olddevice)
79367+ && (match->mode & GR_DELETED)) {
79368+ if (match->prev == NULL) {
79369+ role->subj_hash[index] = match->next;
79370+ if (match->next != NULL)
79371+ match->next->prev = NULL;
79372+ } else {
79373+ match->prev->next = match->next;
79374+ if (match->next != NULL)
79375+ match->next->prev = match->prev;
79376+ }
79377+ match->prev = NULL;
79378+ match->next = NULL;
79379+ match->inode = newinode;
79380+ match->device = newdevice;
79381+ match->mode &= ~GR_DELETED;
79382+
79383+ insert_acl_subj_label(match, role);
79384+ }
79385+
79386+ return;
79387+}
79388+
79389+static void
79390+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
79391+ const u64 newinode, const dev_t newdevice)
79392+{
79393+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
79394+ struct inodev_entry *match;
79395+
79396+ match = running_polstate.inodev_set.i_hash[index];
79397+
79398+ while (match && (match->nentry->inode != oldinode ||
79399+ match->nentry->device != olddevice || !match->nentry->deleted))
79400+ match = match->next;
79401+
79402+ if (match && (match->nentry->inode == oldinode)
79403+ && (match->nentry->device == olddevice) &&
79404+ match->nentry->deleted) {
79405+ if (match->prev == NULL) {
79406+ running_polstate.inodev_set.i_hash[index] = match->next;
79407+ if (match->next != NULL)
79408+ match->next->prev = NULL;
79409+ } else {
79410+ match->prev->next = match->next;
79411+ if (match->next != NULL)
79412+ match->next->prev = match->prev;
79413+ }
79414+ match->prev = NULL;
79415+ match->next = NULL;
79416+ match->nentry->inode = newinode;
79417+ match->nentry->device = newdevice;
79418+ match->nentry->deleted = 0;
79419+
79420+ insert_inodev_entry(match);
79421+ }
79422+
79423+ return;
79424+}
79425+
79426+static void
79427+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
79428+{
79429+ struct acl_subject_label *subj;
79430+ struct acl_role_label *role;
79431+ unsigned int x;
79432+
79433+ FOR_EACH_ROLE_START(role)
79434+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
79435+
79436+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
79437+ if ((subj->inode == ino) && (subj->device == dev)) {
79438+ subj->inode = ino;
79439+ subj->device = dev;
79440+ }
79441+ /* nested subjects aren't in the role's subj_hash table */
79442+ update_acl_obj_label(matchn->inode, matchn->device,
79443+ ino, dev, subj);
79444+ FOR_EACH_NESTED_SUBJECT_END(subj)
79445+ FOR_EACH_SUBJECT_START(role, subj, x)
79446+ update_acl_obj_label(matchn->inode, matchn->device,
79447+ ino, dev, subj);
79448+ FOR_EACH_SUBJECT_END(subj,x)
79449+ FOR_EACH_ROLE_END(role)
79450+
79451+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
79452+
79453+ return;
79454+}
79455+
79456+static void
79457+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
79458+ const struct vfsmount *mnt)
79459+{
79460+ u64 ino = __get_ino(dentry);
79461+ dev_t dev = __get_dev(dentry);
79462+
79463+ __do_handle_create(matchn, ino, dev);
79464+
79465+ return;
79466+}
79467+
79468+void
79469+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
79470+{
79471+ struct name_entry *matchn;
79472+
79473+ if (unlikely(!(gr_status & GR_READY)))
79474+ return;
79475+
79476+ preempt_disable();
79477+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
79478+
79479+ if (unlikely((unsigned long)matchn)) {
79480+ write_lock(&gr_inode_lock);
79481+ do_handle_create(matchn, dentry, mnt);
79482+ write_unlock(&gr_inode_lock);
79483+ }
79484+ preempt_enable();
79485+
79486+ return;
79487+}
79488+
79489+void
79490+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
79491+{
79492+ struct name_entry *matchn;
79493+
79494+ if (unlikely(!(gr_status & GR_READY)))
79495+ return;
79496+
79497+ preempt_disable();
79498+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
79499+
79500+ if (unlikely((unsigned long)matchn)) {
79501+ write_lock(&gr_inode_lock);
79502+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
79503+ write_unlock(&gr_inode_lock);
79504+ }
79505+ preempt_enable();
79506+
79507+ return;
79508+}
79509+
79510+void
79511+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
79512+ struct dentry *old_dentry,
79513+ struct dentry *new_dentry,
79514+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
79515+{
79516+ struct name_entry *matchn;
79517+ struct name_entry *matchn2 = NULL;
79518+ struct inodev_entry *inodev;
79519+ struct inode *inode = new_dentry->d_inode;
79520+ u64 old_ino = __get_ino(old_dentry);
79521+ dev_t old_dev = __get_dev(old_dentry);
79522+ unsigned int exchange = flags & RENAME_EXCHANGE;
79523+
79524+ /* vfs_rename swaps the name and parent link for old_dentry and
79525+ new_dentry
79526+ at this point, old_dentry has the new name, parent link, and inode
79527+ for the renamed file
79528+ if a file is being replaced by a rename, new_dentry has the inode
79529+ and name for the replaced file
79530+ */
79531+
79532+ if (unlikely(!(gr_status & GR_READY)))
79533+ return;
79534+
79535+ preempt_disable();
79536+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
79537+
79538+ /* exchange cases:
79539+ a filename exists for the source, but not dest
79540+ do a recreate on source
79541+ a filename exists for the dest, but not source
79542+ do a recreate on dest
79543+ a filename exists for both source and dest
79544+ delete source and dest, then create source and dest
79545+ a filename exists for neither source nor dest
79546+ no updates needed
79547+
79548+ the name entry lookups get us the old inode/dev associated with
79549+ each name, so do the deletes first (if possible) so that when
79550+ we do the create, we pick up on the right entries
79551+ */
79552+
79553+ if (exchange)
79554+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
79555+
79556+ /* we wouldn't have to check d_inode if it weren't for
79557+ NFS silly-renaming
79558+ */
79559+
79560+ write_lock(&gr_inode_lock);
79561+ if (unlikely((replace || exchange) && inode)) {
79562+ u64 new_ino = __get_ino(new_dentry);
79563+ dev_t new_dev = __get_dev(new_dentry);
79564+
79565+ inodev = lookup_inodev_entry(new_ino, new_dev);
79566+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
79567+ do_handle_delete(inodev, new_ino, new_dev);
79568+ }
79569+
79570+ inodev = lookup_inodev_entry(old_ino, old_dev);
79571+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
79572+ do_handle_delete(inodev, old_ino, old_dev);
79573+
79574+ if (unlikely(matchn != NULL))
79575+ do_handle_create(matchn, old_dentry, mnt);
79576+
79577+ if (unlikely(matchn2 != NULL))
79578+ do_handle_create(matchn2, new_dentry, mnt);
79579+
79580+ write_unlock(&gr_inode_lock);
79581+ preempt_enable();
79582+
79583+ return;
79584+}
79585+
79586+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
79587+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
79588+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
79589+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
79590+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
79591+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
79592+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
79593+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
79594+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
79595+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
79596+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
79597+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
79598+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
79599+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
79600+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
79601+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
79602+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
79603+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
79604+};
79605+
79606+void
79607+gr_learn_resource(const struct task_struct *task,
79608+ const int res, const unsigned long wanted, const int gt)
79609+{
79610+ struct acl_subject_label *acl;
79611+ const struct cred *cred;
79612+
79613+ if (unlikely((gr_status & GR_READY) &&
79614+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
79615+ goto skip_reslog;
79616+
79617+ gr_log_resource(task, res, wanted, gt);
79618+skip_reslog:
79619+
79620+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
79621+ return;
79622+
79623+ acl = task->acl;
79624+
79625+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
79626+ !(acl->resmask & (1U << (unsigned short) res))))
79627+ return;
79628+
79629+ if (wanted >= acl->res[res].rlim_cur) {
79630+ unsigned long res_add;
79631+
79632+ res_add = wanted + res_learn_bumps[res];
79633+
79634+ acl->res[res].rlim_cur = res_add;
79635+
79636+ if (wanted > acl->res[res].rlim_max)
79637+ acl->res[res].rlim_max = res_add;
79638+
79639+ /* only log the subject filename, since resource logging is supported for
79640+ single-subject learning only */
79641+ rcu_read_lock();
79642+ cred = __task_cred(task);
79643+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
79644+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
79645+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
79646+ "", (unsigned long) res, &task->signal->saved_ip);
79647+ rcu_read_unlock();
79648+ }
79649+
79650+ return;
79651+}
79652+EXPORT_SYMBOL_GPL(gr_learn_resource);
79653+#endif
79654+
79655+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
79656+void
79657+pax_set_initial_flags(struct linux_binprm *bprm)
79658+{
79659+ struct task_struct *task = current;
79660+ struct acl_subject_label *proc;
79661+ unsigned long flags;
79662+
79663+ if (unlikely(!(gr_status & GR_READY)))
79664+ return;
79665+
79666+ flags = pax_get_flags(task);
79667+
79668+ proc = task->acl;
79669+
79670+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
79671+ flags &= ~MF_PAX_PAGEEXEC;
79672+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
79673+ flags &= ~MF_PAX_SEGMEXEC;
79674+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
79675+ flags &= ~MF_PAX_RANDMMAP;
79676+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
79677+ flags &= ~MF_PAX_EMUTRAMP;
79678+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
79679+ flags &= ~MF_PAX_MPROTECT;
79680+
79681+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
79682+ flags |= MF_PAX_PAGEEXEC;
79683+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
79684+ flags |= MF_PAX_SEGMEXEC;
79685+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
79686+ flags |= MF_PAX_RANDMMAP;
79687+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
79688+ flags |= MF_PAX_EMUTRAMP;
79689+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
79690+ flags |= MF_PAX_MPROTECT;
79691+
79692+ pax_set_flags(task, flags);
79693+
79694+ return;
79695+}
79696+#endif
79697+
79698+int
79699+gr_handle_proc_ptrace(struct task_struct *task)
79700+{
79701+ struct file *filp;
79702+ struct task_struct *tmp = task;
79703+ struct task_struct *curtemp = current;
79704+ __u32 retmode;
79705+
79706+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
79707+ if (unlikely(!(gr_status & GR_READY)))
79708+ return 0;
79709+#endif
79710+
79711+ read_lock(&tasklist_lock);
79712+ read_lock(&grsec_exec_file_lock);
79713+ filp = task->exec_file;
79714+
79715+ while (task_pid_nr(tmp) > 0) {
79716+ if (tmp == curtemp)
79717+ break;
79718+ tmp = tmp->real_parent;
79719+ }
79720+
79721+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
79722+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
79723+ read_unlock(&grsec_exec_file_lock);
79724+ read_unlock(&tasklist_lock);
79725+ return 1;
79726+ }
79727+
79728+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79729+ if (!(gr_status & GR_READY)) {
79730+ read_unlock(&grsec_exec_file_lock);
79731+ read_unlock(&tasklist_lock);
79732+ return 0;
79733+ }
79734+#endif
79735+
79736+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
79737+ read_unlock(&grsec_exec_file_lock);
79738+ read_unlock(&tasklist_lock);
79739+
79740+ if (retmode & GR_NOPTRACE)
79741+ return 1;
79742+
79743+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
79744+ && (current->acl != task->acl || (current->acl != current->role->root_label
79745+ && task_pid_nr(current) != task_pid_nr(task))))
79746+ return 1;
79747+
79748+ return 0;
79749+}
79750+
79751+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
79752+{
79753+ if (unlikely(!(gr_status & GR_READY)))
79754+ return;
79755+
79756+ if (!(current->role->roletype & GR_ROLE_GOD))
79757+ return;
79758+
79759+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
79760+ p->role->rolename, gr_task_roletype_to_char(p),
79761+ p->acl->filename);
79762+}
79763+
79764+int
79765+gr_handle_ptrace(struct task_struct *task, const long request)
79766+{
79767+ struct task_struct *tmp = task;
79768+ struct task_struct *curtemp = current;
79769+ __u32 retmode;
79770+
79771+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
79772+ if (unlikely(!(gr_status & GR_READY)))
79773+ return 0;
79774+#endif
79775+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
79776+ read_lock(&tasklist_lock);
79777+ while (task_pid_nr(tmp) > 0) {
79778+ if (tmp == curtemp)
79779+ break;
79780+ tmp = tmp->real_parent;
79781+ }
79782+
79783+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
79784+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
79785+ read_unlock(&tasklist_lock);
79786+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
79787+ return 1;
79788+ }
79789+ read_unlock(&tasklist_lock);
79790+ }
79791+
79792+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79793+ if (!(gr_status & GR_READY))
79794+ return 0;
79795+#endif
79796+
79797+ read_lock(&grsec_exec_file_lock);
79798+ if (unlikely(!task->exec_file)) {
79799+ read_unlock(&grsec_exec_file_lock);
79800+ return 0;
79801+ }
79802+
79803+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
79804+ read_unlock(&grsec_exec_file_lock);
79805+
79806+ if (retmode & GR_NOPTRACE) {
79807+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
79808+ return 1;
79809+ }
79810+
79811+ if (retmode & GR_PTRACERD) {
79812+ switch (request) {
79813+ case PTRACE_SEIZE:
79814+ case PTRACE_POKETEXT:
79815+ case PTRACE_POKEDATA:
79816+ case PTRACE_POKEUSR:
79817+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
79818+ case PTRACE_SETREGS:
79819+ case PTRACE_SETFPREGS:
79820+#endif
79821+#ifdef CONFIG_X86
79822+ case PTRACE_SETFPXREGS:
79823+#endif
79824+#ifdef CONFIG_ALTIVEC
79825+ case PTRACE_SETVRREGS:
79826+#endif
79827+ return 1;
79828+ default:
79829+ return 0;
79830+ }
79831+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
79832+ !(current->role->roletype & GR_ROLE_GOD) &&
79833+ (current->acl != task->acl)) {
79834+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
79835+ return 1;
79836+ }
79837+
79838+ return 0;
79839+}
79840+
79841+static int is_writable_mmap(const struct file *filp)
79842+{
79843+ struct task_struct *task = current;
79844+ struct acl_object_label *obj, *obj2;
79845+
79846+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
79847+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
79848+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
79849+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
79850+ task->role->root_label);
79851+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
79852+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
79853+ return 1;
79854+ }
79855+ }
79856+ return 0;
79857+}
79858+
79859+int
79860+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
79861+{
79862+ __u32 mode;
79863+
79864+ if (unlikely(!file || !(prot & PROT_EXEC)))
79865+ return 1;
79866+
79867+ if (is_writable_mmap(file))
79868+ return 0;
79869+
79870+ mode =
79871+ gr_search_file(file->f_path.dentry,
79872+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
79873+ file->f_path.mnt);
79874+
79875+ if (!gr_tpe_allow(file))
79876+ return 0;
79877+
79878+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
79879+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
79880+ return 0;
79881+ } else if (unlikely(!(mode & GR_EXEC))) {
79882+ return 0;
79883+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
79884+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
79885+ return 1;
79886+ }
79887+
79888+ return 1;
79889+}
79890+
79891+int
79892+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
79893+{
79894+ __u32 mode;
79895+
79896+ if (unlikely(!file || !(prot & PROT_EXEC)))
79897+ return 1;
79898+
79899+ if (is_writable_mmap(file))
79900+ return 0;
79901+
79902+ mode =
79903+ gr_search_file(file->f_path.dentry,
79904+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
79905+ file->f_path.mnt);
79906+
79907+ if (!gr_tpe_allow(file))
79908+ return 0;
79909+
79910+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
79911+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
79912+ return 0;
79913+ } else if (unlikely(!(mode & GR_EXEC))) {
79914+ return 0;
79915+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
79916+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
79917+ return 1;
79918+ }
79919+
79920+ return 1;
79921+}
79922+
79923+void
79924+gr_acl_handle_psacct(struct task_struct *task, const long code)
79925+{
79926+ unsigned long runtime, cputime;
79927+ cputime_t utime, stime;
79928+ unsigned int wday, cday;
79929+ __u8 whr, chr;
79930+ __u8 wmin, cmin;
79931+ __u8 wsec, csec;
79932+ struct timespec curtime, starttime;
79933+
79934+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
79935+ !(task->acl->mode & GR_PROCACCT)))
79936+ return;
79937+
79938+ curtime = ns_to_timespec(ktime_get_ns());
79939+ starttime = ns_to_timespec(task->start_time);
79940+ runtime = curtime.tv_sec - starttime.tv_sec;
79941+ wday = runtime / (60 * 60 * 24);
79942+ runtime -= wday * (60 * 60 * 24);
79943+ whr = runtime / (60 * 60);
79944+ runtime -= whr * (60 * 60);
79945+ wmin = runtime / 60;
79946+ runtime -= wmin * 60;
79947+ wsec = runtime;
79948+
79949+ task_cputime(task, &utime, &stime);
79950+ cputime = cputime_to_secs(utime + stime);
79951+ cday = cputime / (60 * 60 * 24);
79952+ cputime -= cday * (60 * 60 * 24);
79953+ chr = cputime / (60 * 60);
79954+ cputime -= chr * (60 * 60);
79955+ cmin = cputime / 60;
79956+ cputime -= cmin * 60;
79957+ csec = cputime;
79958+
79959+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
79960+
79961+ return;
79962+}
79963+
79964+#ifdef CONFIG_TASKSTATS
79965+int gr_is_taskstats_denied(int pid)
79966+{
79967+ struct task_struct *task;
79968+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79969+ const struct cred *cred;
79970+#endif
79971+ int ret = 0;
79972+
79973+ /* restrict taskstats viewing to un-chrooted root users
79974+ who have the 'view' subject flag if the RBAC system is enabled
79975+ */
79976+
79977+ rcu_read_lock();
79978+ read_lock(&tasklist_lock);
79979+ task = find_task_by_vpid(pid);
79980+ if (task) {
79981+#ifdef CONFIG_GRKERNSEC_CHROOT
79982+ if (proc_is_chrooted(task))
79983+ ret = -EACCES;
79984+#endif
79985+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79986+ cred = __task_cred(task);
79987+#ifdef CONFIG_GRKERNSEC_PROC_USER
79988+ if (gr_is_global_nonroot(cred->uid))
79989+ ret = -EACCES;
79990+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
79991+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
79992+ ret = -EACCES;
79993+#endif
79994+#endif
79995+ if (gr_status & GR_READY) {
79996+ if (!(task->acl->mode & GR_VIEW))
79997+ ret = -EACCES;
79998+ }
79999+ } else
80000+ ret = -ENOENT;
80001+
80002+ read_unlock(&tasklist_lock);
80003+ rcu_read_unlock();
80004+
80005+ return ret;
80006+}
80007+#endif
80008+
80009+/* AUXV entries are filled via a descendant of search_binary_handler
80010+ after we've already applied the subject for the target
80011+*/
80012+int gr_acl_enable_at_secure(void)
80013+{
80014+ if (unlikely(!(gr_status & GR_READY)))
80015+ return 0;
80016+
80017+ if (current->acl->mode & GR_ATSECURE)
80018+ return 1;
80019+
80020+ return 0;
80021+}
80022+
80023+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
80024+{
80025+ struct task_struct *task = current;
80026+ struct dentry *dentry = file->f_path.dentry;
80027+ struct vfsmount *mnt = file->f_path.mnt;
80028+ struct acl_object_label *obj, *tmp;
80029+ struct acl_subject_label *subj;
80030+ unsigned int bufsize;
80031+ int is_not_root;
80032+ char *path;
80033+ dev_t dev = __get_dev(dentry);
80034+
80035+ if (unlikely(!(gr_status & GR_READY)))
80036+ return 1;
80037+
80038+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
80039+ return 1;
80040+
80041+ /* ignore Eric Biederman */
80042+ if (IS_PRIVATE(dentry->d_inode))
80043+ return 1;
80044+
80045+ subj = task->acl;
80046+ read_lock(&gr_inode_lock);
80047+ do {
80048+ obj = lookup_acl_obj_label(ino, dev, subj);
80049+ if (obj != NULL) {
80050+ read_unlock(&gr_inode_lock);
80051+ return (obj->mode & GR_FIND) ? 1 : 0;
80052+ }
80053+ } while ((subj = subj->parent_subject));
80054+ read_unlock(&gr_inode_lock);
80055+
80056+ /* this is purely an optimization since we're looking for an object
80057+ for the directory we're doing a readdir on
80058+ if it's possible for any globbed object to match the entry we're
80059+ filling into the directory, then the object we find here will be
80060+ an anchor point with attached globbed objects
80061+ */
80062+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
80063+ if (obj->globbed == NULL)
80064+ return (obj->mode & GR_FIND) ? 1 : 0;
80065+
80066+ is_not_root = ((obj->filename[0] == '/') &&
80067+ (obj->filename[1] == '\0')) ? 0 : 1;
80068+ bufsize = PAGE_SIZE - namelen - is_not_root;
80069+
80070+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
80071+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
80072+ return 1;
80073+
80074+ preempt_disable();
80075+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
80076+ bufsize);
80077+
80078+ bufsize = strlen(path);
80079+
80080+ /* if base is "/", don't append an additional slash */
80081+ if (is_not_root)
80082+ *(path + bufsize) = '/';
80083+ memcpy(path + bufsize + is_not_root, name, namelen);
80084+ *(path + bufsize + namelen + is_not_root) = '\0';
80085+
80086+ tmp = obj->globbed;
80087+ while (tmp) {
80088+ if (!glob_match(tmp->filename, path)) {
80089+ preempt_enable();
80090+ return (tmp->mode & GR_FIND) ? 1 : 0;
80091+ }
80092+ tmp = tmp->next;
80093+ }
80094+ preempt_enable();
80095+ return (obj->mode & GR_FIND) ? 1 : 0;
80096+}
80097+
80098+void gr_put_exec_file(struct task_struct *task)
80099+{
80100+ struct file *filp;
80101+
80102+ write_lock(&grsec_exec_file_lock);
80103+ filp = task->exec_file;
80104+ task->exec_file = NULL;
80105+ write_unlock(&grsec_exec_file_lock);
80106+
80107+ if (filp)
80108+ fput(filp);
80109+
80110+ return;
80111+}
80112+
80113+
80114+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
80115+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
80116+#endif
80117+#ifdef CONFIG_SECURITY
80118+EXPORT_SYMBOL_GPL(gr_check_user_change);
80119+EXPORT_SYMBOL_GPL(gr_check_group_change);
80120+#endif
80121+
80122diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
80123new file mode 100644
80124index 0000000..9adc75c
80125--- /dev/null
80126+++ b/grsecurity/gracl_alloc.c
80127@@ -0,0 +1,105 @@
80128+#include <linux/kernel.h>
80129+#include <linux/mm.h>
80130+#include <linux/slab.h>
80131+#include <linux/vmalloc.h>
80132+#include <linux/gracl.h>
80133+#include <linux/grsecurity.h>
80134+
80135+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
80136+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
80137+
80138+static int
80139+alloc_pop(void)
80140+{
80141+ if (current_alloc_state->alloc_stack_next == 1)
80142+ return 0;
80143+
80144+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
80145+
80146+ current_alloc_state->alloc_stack_next--;
80147+
80148+ return 1;
80149+}
80150+
80151+static int
80152+alloc_push(void *buf)
80153+{
80154+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
80155+ return 1;
80156+
80157+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
80158+
80159+ current_alloc_state->alloc_stack_next++;
80160+
80161+ return 0;
80162+}
80163+
80164+void *
80165+acl_alloc(unsigned long len)
80166+{
80167+ void *ret = NULL;
80168+
80169+ if (!len || len > PAGE_SIZE)
80170+ goto out;
80171+
80172+ ret = kmalloc(len, GFP_KERNEL);
80173+
80174+ if (ret) {
80175+ if (alloc_push(ret)) {
80176+ kfree(ret);
80177+ ret = NULL;
80178+ }
80179+ }
80180+
80181+out:
80182+ return ret;
80183+}
80184+
80185+void *
80186+acl_alloc_num(unsigned long num, unsigned long len)
80187+{
80188+ if (!len || (num > (PAGE_SIZE / len)))
80189+ return NULL;
80190+
80191+ return acl_alloc(num * len);
80192+}
80193+
80194+void
80195+acl_free_all(void)
80196+{
80197+ if (!current_alloc_state->alloc_stack)
80198+ return;
80199+
80200+ while (alloc_pop()) ;
80201+
80202+ if (current_alloc_state->alloc_stack) {
80203+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
80204+ kfree(current_alloc_state->alloc_stack);
80205+ else
80206+ vfree(current_alloc_state->alloc_stack);
80207+ }
80208+
80209+ current_alloc_state->alloc_stack = NULL;
80210+ current_alloc_state->alloc_stack_size = 1;
80211+ current_alloc_state->alloc_stack_next = 1;
80212+
80213+ return;
80214+}
80215+
80216+int
80217+acl_alloc_stack_init(unsigned long size)
80218+{
80219+ if ((size * sizeof (void *)) <= PAGE_SIZE)
80220+ current_alloc_state->alloc_stack =
80221+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
80222+ else
80223+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
80224+
80225+ current_alloc_state->alloc_stack_size = size;
80226+ current_alloc_state->alloc_stack_next = 1;
80227+
80228+ if (!current_alloc_state->alloc_stack)
80229+ return 0;
80230+ else
80231+ return 1;
80232+}
80233diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
80234new file mode 100644
80235index 0000000..1a94c11
80236--- /dev/null
80237+++ b/grsecurity/gracl_cap.c
80238@@ -0,0 +1,127 @@
80239+#include <linux/kernel.h>
80240+#include <linux/module.h>
80241+#include <linux/sched.h>
80242+#include <linux/gracl.h>
80243+#include <linux/grsecurity.h>
80244+#include <linux/grinternal.h>
80245+
80246+extern const char *captab_log[];
80247+extern int captab_log_entries;
80248+
80249+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
80250+{
80251+ struct acl_subject_label *curracl;
80252+
80253+ if (!gr_acl_is_enabled())
80254+ return 1;
80255+
80256+ curracl = task->acl;
80257+
80258+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
80259+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
80260+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
80261+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
80262+ gr_to_filename(task->exec_file->f_path.dentry,
80263+ task->exec_file->f_path.mnt) : curracl->filename,
80264+ curracl->filename, 0UL,
80265+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
80266+ return 1;
80267+ }
80268+
80269+ return 0;
80270+}
80271+
80272+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
80273+{
80274+ struct acl_subject_label *curracl;
80275+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
80276+ kernel_cap_t cap_audit = __cap_empty_set;
80277+
80278+ if (!gr_acl_is_enabled())
80279+ return 1;
80280+
80281+ curracl = task->acl;
80282+
80283+ cap_drop = curracl->cap_lower;
80284+ cap_mask = curracl->cap_mask;
80285+ cap_audit = curracl->cap_invert_audit;
80286+
80287+ while ((curracl = curracl->parent_subject)) {
80288+ /* if the cap isn't specified in the current computed mask but is specified in the
80289+ current level subject, and is lowered in the current level subject, then add
80290+ it to the set of dropped capabilities
80291+ otherwise, add the current level subject's mask to the current computed mask
80292+ */
80293+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
80294+ cap_raise(cap_mask, cap);
80295+ if (cap_raised(curracl->cap_lower, cap))
80296+ cap_raise(cap_drop, cap);
80297+ if (cap_raised(curracl->cap_invert_audit, cap))
80298+ cap_raise(cap_audit, cap);
80299+ }
80300+ }
80301+
80302+ if (!cap_raised(cap_drop, cap)) {
80303+ if (cap_raised(cap_audit, cap))
80304+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
80305+ return 1;
80306+ }
80307+
80308+ /* only learn the capability use if the process has the capability in the
80309+ general case, the two uses in sys.c of gr_learn_cap are an exception
80310+ to this rule to ensure any role transition involves what the full-learned
80311+ policy believes in a privileged process
80312+ */
80313+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
80314+ return 1;
80315+
80316+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
80317+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
80318+
80319+ return 0;
80320+}
80321+
80322+int
80323+gr_acl_is_capable(const int cap)
80324+{
80325+ return gr_task_acl_is_capable(current, current_cred(), cap);
80326+}
80327+
80328+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
80329+{
80330+ struct acl_subject_label *curracl;
80331+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
80332+
80333+ if (!gr_acl_is_enabled())
80334+ return 1;
80335+
80336+ curracl = task->acl;
80337+
80338+ cap_drop = curracl->cap_lower;
80339+ cap_mask = curracl->cap_mask;
80340+
80341+ while ((curracl = curracl->parent_subject)) {
80342+ /* if the cap isn't specified in the current computed mask but is specified in the
80343+ current level subject, and is lowered in the current level subject, then add
80344+ it to the set of dropped capabilities
80345+ otherwise, add the current level subject's mask to the current computed mask
80346+ */
80347+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
80348+ cap_raise(cap_mask, cap);
80349+ if (cap_raised(curracl->cap_lower, cap))
80350+ cap_raise(cap_drop, cap);
80351+ }
80352+ }
80353+
80354+ if (!cap_raised(cap_drop, cap))
80355+ return 1;
80356+
80357+ return 0;
80358+}
80359+
80360+int
80361+gr_acl_is_capable_nolog(const int cap)
80362+{
80363+ return gr_task_acl_is_capable_nolog(current, cap);
80364+}
80365+
80366diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
80367new file mode 100644
80368index 0000000..a43dd06
80369--- /dev/null
80370+++ b/grsecurity/gracl_compat.c
80371@@ -0,0 +1,269 @@
80372+#include <linux/kernel.h>
80373+#include <linux/gracl.h>
80374+#include <linux/compat.h>
80375+#include <linux/gracl_compat.h>
80376+
80377+#include <asm/uaccess.h>
80378+
80379+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
80380+{
80381+ struct gr_arg_wrapper_compat uwrapcompat;
80382+
80383+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
80384+ return -EFAULT;
80385+
80386+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
80387+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
80388+ return -EINVAL;
80389+
80390+ uwrap->arg = compat_ptr(uwrapcompat.arg);
80391+ uwrap->version = uwrapcompat.version;
80392+ uwrap->size = sizeof(struct gr_arg);
80393+
80394+ return 0;
80395+}
80396+
80397+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
80398+{
80399+ struct gr_arg_compat argcompat;
80400+
80401+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
80402+ return -EFAULT;
80403+
80404+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
80405+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
80406+ arg->role_db.num_roles = argcompat.role_db.num_roles;
80407+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
80408+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
80409+ arg->role_db.num_objects = argcompat.role_db.num_objects;
80410+
80411+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
80412+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
80413+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
80414+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
80415+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
80416+ arg->segv_device = argcompat.segv_device;
80417+ arg->segv_inode = argcompat.segv_inode;
80418+ arg->segv_uid = argcompat.segv_uid;
80419+ arg->num_sprole_pws = argcompat.num_sprole_pws;
80420+ arg->mode = argcompat.mode;
80421+
80422+ return 0;
80423+}
80424+
80425+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
80426+{
80427+ struct acl_object_label_compat objcompat;
80428+
80429+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
80430+ return -EFAULT;
80431+
80432+ obj->filename = compat_ptr(objcompat.filename);
80433+ obj->inode = objcompat.inode;
80434+ obj->device = objcompat.device;
80435+ obj->mode = objcompat.mode;
80436+
80437+ obj->nested = compat_ptr(objcompat.nested);
80438+ obj->globbed = compat_ptr(objcompat.globbed);
80439+
80440+ obj->prev = compat_ptr(objcompat.prev);
80441+ obj->next = compat_ptr(objcompat.next);
80442+
80443+ return 0;
80444+}
80445+
80446+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
80447+{
80448+ unsigned int i;
80449+ struct acl_subject_label_compat subjcompat;
80450+
80451+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
80452+ return -EFAULT;
80453+
80454+ subj->filename = compat_ptr(subjcompat.filename);
80455+ subj->inode = subjcompat.inode;
80456+ subj->device = subjcompat.device;
80457+ subj->mode = subjcompat.mode;
80458+ subj->cap_mask = subjcompat.cap_mask;
80459+ subj->cap_lower = subjcompat.cap_lower;
80460+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
80461+
80462+ for (i = 0; i < GR_NLIMITS; i++) {
80463+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
80464+ subj->res[i].rlim_cur = RLIM_INFINITY;
80465+ else
80466+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
80467+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
80468+ subj->res[i].rlim_max = RLIM_INFINITY;
80469+ else
80470+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
80471+ }
80472+ subj->resmask = subjcompat.resmask;
80473+
80474+ subj->user_trans_type = subjcompat.user_trans_type;
80475+ subj->group_trans_type = subjcompat.group_trans_type;
80476+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
80477+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
80478+ subj->user_trans_num = subjcompat.user_trans_num;
80479+ subj->group_trans_num = subjcompat.group_trans_num;
80480+
80481+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
80482+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
80483+ subj->ip_type = subjcompat.ip_type;
80484+ subj->ips = compat_ptr(subjcompat.ips);
80485+ subj->ip_num = subjcompat.ip_num;
80486+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
80487+
80488+ subj->crashes = subjcompat.crashes;
80489+ subj->expires = subjcompat.expires;
80490+
80491+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
80492+ subj->hash = compat_ptr(subjcompat.hash);
80493+ subj->prev = compat_ptr(subjcompat.prev);
80494+ subj->next = compat_ptr(subjcompat.next);
80495+
80496+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
80497+ subj->obj_hash_size = subjcompat.obj_hash_size;
80498+ subj->pax_flags = subjcompat.pax_flags;
80499+
80500+ return 0;
80501+}
80502+
80503+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
80504+{
80505+ struct acl_role_label_compat rolecompat;
80506+
80507+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
80508+ return -EFAULT;
80509+
80510+ role->rolename = compat_ptr(rolecompat.rolename);
80511+ role->uidgid = rolecompat.uidgid;
80512+ role->roletype = rolecompat.roletype;
80513+
80514+ role->auth_attempts = rolecompat.auth_attempts;
80515+ role->expires = rolecompat.expires;
80516+
80517+ role->root_label = compat_ptr(rolecompat.root_label);
80518+ role->hash = compat_ptr(rolecompat.hash);
80519+
80520+ role->prev = compat_ptr(rolecompat.prev);
80521+ role->next = compat_ptr(rolecompat.next);
80522+
80523+ role->transitions = compat_ptr(rolecompat.transitions);
80524+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
80525+ role->domain_children = compat_ptr(rolecompat.domain_children);
80526+ role->domain_child_num = rolecompat.domain_child_num;
80527+
80528+ role->umask = rolecompat.umask;
80529+
80530+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
80531+ role->subj_hash_size = rolecompat.subj_hash_size;
80532+
80533+ return 0;
80534+}
80535+
80536+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
80537+{
80538+ struct role_allowed_ip_compat roleip_compat;
80539+
80540+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
80541+ return -EFAULT;
80542+
80543+ roleip->addr = roleip_compat.addr;
80544+ roleip->netmask = roleip_compat.netmask;
80545+
80546+ roleip->prev = compat_ptr(roleip_compat.prev);
80547+ roleip->next = compat_ptr(roleip_compat.next);
80548+
80549+ return 0;
80550+}
80551+
80552+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
80553+{
80554+ struct role_transition_compat trans_compat;
80555+
80556+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
80557+ return -EFAULT;
80558+
80559+ trans->rolename = compat_ptr(trans_compat.rolename);
80560+
80561+ trans->prev = compat_ptr(trans_compat.prev);
80562+ trans->next = compat_ptr(trans_compat.next);
80563+
80564+ return 0;
80565+
80566+}
80567+
80568+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
80569+{
80570+ struct gr_hash_struct_compat hash_compat;
80571+
80572+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
80573+ return -EFAULT;
80574+
80575+ hash->table = compat_ptr(hash_compat.table);
80576+ hash->nametable = compat_ptr(hash_compat.nametable);
80577+ hash->first = compat_ptr(hash_compat.first);
80578+
80579+ hash->table_size = hash_compat.table_size;
80580+ hash->used_size = hash_compat.used_size;
80581+
80582+ hash->type = hash_compat.type;
80583+
80584+ return 0;
80585+}
80586+
80587+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
80588+{
80589+ compat_uptr_t ptrcompat;
80590+
80591+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
80592+ return -EFAULT;
80593+
80594+ *(void **)ptr = compat_ptr(ptrcompat);
80595+
80596+ return 0;
80597+}
80598+
80599+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
80600+{
80601+ struct acl_ip_label_compat ip_compat;
80602+
80603+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
80604+ return -EFAULT;
80605+
80606+ ip->iface = compat_ptr(ip_compat.iface);
80607+ ip->addr = ip_compat.addr;
80608+ ip->netmask = ip_compat.netmask;
80609+ ip->low = ip_compat.low;
80610+ ip->high = ip_compat.high;
80611+ ip->mode = ip_compat.mode;
80612+ ip->type = ip_compat.type;
80613+
80614+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
80615+
80616+ ip->prev = compat_ptr(ip_compat.prev);
80617+ ip->next = compat_ptr(ip_compat.next);
80618+
80619+ return 0;
80620+}
80621+
80622+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
80623+{
80624+ struct sprole_pw_compat pw_compat;
80625+
80626+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
80627+ return -EFAULT;
80628+
80629+ pw->rolename = compat_ptr(pw_compat.rolename);
80630+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
80631+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
80632+
80633+ return 0;
80634+}
80635+
80636+size_t get_gr_arg_wrapper_size_compat(void)
80637+{
80638+ return sizeof(struct gr_arg_wrapper_compat);
80639+}
80640+
80641diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
80642new file mode 100644
80643index 0000000..8ee8e4f
80644--- /dev/null
80645+++ b/grsecurity/gracl_fs.c
80646@@ -0,0 +1,447 @@
80647+#include <linux/kernel.h>
80648+#include <linux/sched.h>
80649+#include <linux/types.h>
80650+#include <linux/fs.h>
80651+#include <linux/file.h>
80652+#include <linux/stat.h>
80653+#include <linux/grsecurity.h>
80654+#include <linux/grinternal.h>
80655+#include <linux/gracl.h>
80656+
80657+umode_t
80658+gr_acl_umask(void)
80659+{
80660+ if (unlikely(!gr_acl_is_enabled()))
80661+ return 0;
80662+
80663+ return current->role->umask;
80664+}
80665+
80666+__u32
80667+gr_acl_handle_hidden_file(const struct dentry * dentry,
80668+ const struct vfsmount * mnt)
80669+{
80670+ __u32 mode;
80671+
80672+ if (unlikely(d_is_negative(dentry)))
80673+ return GR_FIND;
80674+
80675+ mode =
80676+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
80677+
80678+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
80679+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
80680+ return mode;
80681+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
80682+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
80683+ return 0;
80684+ } else if (unlikely(!(mode & GR_FIND)))
80685+ return 0;
80686+
80687+ return GR_FIND;
80688+}
80689+
80690+__u32
80691+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
80692+ int acc_mode)
80693+{
80694+ __u32 reqmode = GR_FIND;
80695+ __u32 mode;
80696+
80697+ if (unlikely(d_is_negative(dentry)))
80698+ return reqmode;
80699+
80700+ if (acc_mode & MAY_APPEND)
80701+ reqmode |= GR_APPEND;
80702+ else if (acc_mode & MAY_WRITE)
80703+ reqmode |= GR_WRITE;
80704+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
80705+ reqmode |= GR_READ;
80706+
80707+ mode =
80708+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
80709+ mnt);
80710+
80711+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
80712+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
80713+ reqmode & GR_READ ? " reading" : "",
80714+ reqmode & GR_WRITE ? " writing" : reqmode &
80715+ GR_APPEND ? " appending" : "");
80716+ return reqmode;
80717+ } else
80718+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
80719+ {
80720+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
80721+ reqmode & GR_READ ? " reading" : "",
80722+ reqmode & GR_WRITE ? " writing" : reqmode &
80723+ GR_APPEND ? " appending" : "");
80724+ return 0;
80725+ } else if (unlikely((mode & reqmode) != reqmode))
80726+ return 0;
80727+
80728+ return reqmode;
80729+}
80730+
80731+__u32
80732+gr_acl_handle_creat(const struct dentry * dentry,
80733+ const struct dentry * p_dentry,
80734+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
80735+ const int imode)
80736+{
80737+ __u32 reqmode = GR_WRITE | GR_CREATE;
80738+ __u32 mode;
80739+
80740+ if (acc_mode & MAY_APPEND)
80741+ reqmode |= GR_APPEND;
80742+ // if a directory was required or the directory already exists, then
80743+ // don't count this open as a read
80744+ if ((acc_mode & MAY_READ) &&
80745+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
80746+ reqmode |= GR_READ;
80747+ if ((open_flags & O_CREAT) &&
80748+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
80749+ reqmode |= GR_SETID;
80750+
80751+ mode =
80752+ gr_check_create(dentry, p_dentry, p_mnt,
80753+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
80754+
80755+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
80756+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
80757+ reqmode & GR_READ ? " reading" : "",
80758+ reqmode & GR_WRITE ? " writing" : reqmode &
80759+ GR_APPEND ? " appending" : "");
80760+ return reqmode;
80761+ } else
80762+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
80763+ {
80764+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
80765+ reqmode & GR_READ ? " reading" : "",
80766+ reqmode & GR_WRITE ? " writing" : reqmode &
80767+ GR_APPEND ? " appending" : "");
80768+ return 0;
80769+ } else if (unlikely((mode & reqmode) != reqmode))
80770+ return 0;
80771+
80772+ return reqmode;
80773+}
80774+
80775+__u32
80776+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
80777+ const int fmode)
80778+{
80779+ __u32 mode, reqmode = GR_FIND;
80780+
80781+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
80782+ reqmode |= GR_EXEC;
80783+ if (fmode & S_IWOTH)
80784+ reqmode |= GR_WRITE;
80785+ if (fmode & S_IROTH)
80786+ reqmode |= GR_READ;
80787+
80788+ mode =
80789+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
80790+ mnt);
80791+
80792+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
80793+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
80794+ reqmode & GR_READ ? " reading" : "",
80795+ reqmode & GR_WRITE ? " writing" : "",
80796+ reqmode & GR_EXEC ? " executing" : "");
80797+ return reqmode;
80798+ } else
80799+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
80800+ {
80801+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
80802+ reqmode & GR_READ ? " reading" : "",
80803+ reqmode & GR_WRITE ? " writing" : "",
80804+ reqmode & GR_EXEC ? " executing" : "");
80805+ return 0;
80806+ } else if (unlikely((mode & reqmode) != reqmode))
80807+ return 0;
80808+
80809+ return reqmode;
80810+}
80811+
80812+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
80813+{
80814+ __u32 mode;
80815+
80816+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
80817+
80818+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
80819+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
80820+ return mode;
80821+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
80822+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
80823+ return 0;
80824+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
80825+ return 0;
80826+
80827+ return (reqmode);
80828+}
80829+
80830+__u32
80831+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
80832+{
80833+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
80834+}
80835+
80836+__u32
80837+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
80838+{
80839+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
80840+}
80841+
80842+__u32
80843+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
80844+{
80845+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
80846+}
80847+
80848+__u32
80849+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
80850+{
80851+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
80852+}
80853+
80854+__u32
80855+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
80856+ umode_t *modeptr)
80857+{
80858+ umode_t mode;
80859+
80860+ *modeptr &= ~gr_acl_umask();
80861+ mode = *modeptr;
80862+
80863+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
80864+ return 1;
80865+
80866+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
80867+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
80868+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
80869+ GR_CHMOD_ACL_MSG);
80870+ } else {
80871+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
80872+ }
80873+}
80874+
80875+__u32
80876+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
80877+{
80878+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
80879+}
80880+
80881+__u32
80882+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
80883+{
80884+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
80885+}
80886+
80887+__u32
80888+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
80889+{
80890+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
80891+}
80892+
80893+__u32
80894+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
80895+{
80896+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
80897+}
80898+
80899+__u32
80900+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
80901+{
80902+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
80903+ GR_UNIXCONNECT_ACL_MSG);
80904+}
80905+
80906+/* hardlinks require at minimum create and link permission,
80907+ any additional privilege required is based on the
80908+ privilege of the file being linked to
80909+*/
80910+__u32
80911+gr_acl_handle_link(const struct dentry * new_dentry,
80912+ const struct dentry * parent_dentry,
80913+ const struct vfsmount * parent_mnt,
80914+ const struct dentry * old_dentry,
80915+ const struct vfsmount * old_mnt, const struct filename *to)
80916+{
80917+ __u32 mode;
80918+ __u32 needmode = GR_CREATE | GR_LINK;
80919+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
80920+
80921+ mode =
80922+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
80923+ old_mnt);
80924+
80925+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
80926+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
80927+ return mode;
80928+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
80929+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
80930+ return 0;
80931+ } else if (unlikely((mode & needmode) != needmode))
80932+ return 0;
80933+
80934+ return 1;
80935+}
80936+
80937+__u32
80938+gr_acl_handle_symlink(const struct dentry * new_dentry,
80939+ const struct dentry * parent_dentry,
80940+ const struct vfsmount * parent_mnt, const struct filename *from)
80941+{
80942+ __u32 needmode = GR_WRITE | GR_CREATE;
80943+ __u32 mode;
80944+
80945+ mode =
80946+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
80947+ GR_CREATE | GR_AUDIT_CREATE |
80948+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
80949+
80950+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
80951+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
80952+ return mode;
80953+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
80954+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
80955+ return 0;
80956+ } else if (unlikely((mode & needmode) != needmode))
80957+ return 0;
80958+
80959+ return (GR_WRITE | GR_CREATE);
80960+}
80961+
80962+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
80963+{
80964+ __u32 mode;
80965+
80966+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
80967+
80968+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
80969+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
80970+ return mode;
80971+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
80972+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
80973+ return 0;
80974+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
80975+ return 0;
80976+
80977+ return (reqmode);
80978+}
80979+
80980+__u32
80981+gr_acl_handle_mknod(const struct dentry * new_dentry,
80982+ const struct dentry * parent_dentry,
80983+ const struct vfsmount * parent_mnt,
80984+ const int mode)
80985+{
80986+ __u32 reqmode = GR_WRITE | GR_CREATE;
80987+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
80988+ reqmode |= GR_SETID;
80989+
80990+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
80991+ reqmode, GR_MKNOD_ACL_MSG);
80992+}
80993+
80994+__u32
80995+gr_acl_handle_mkdir(const struct dentry *new_dentry,
80996+ const struct dentry *parent_dentry,
80997+ const struct vfsmount *parent_mnt)
80998+{
80999+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
81000+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
81001+}
81002+
81003+#define RENAME_CHECK_SUCCESS(old, new) \
81004+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
81005+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
81006+
81007+int
81008+gr_acl_handle_rename(struct dentry *new_dentry,
81009+ struct dentry *parent_dentry,
81010+ const struct vfsmount *parent_mnt,
81011+ struct dentry *old_dentry,
81012+ struct inode *old_parent_inode,
81013+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
81014+{
81015+ __u32 comp1, comp2;
81016+ int error = 0;
81017+
81018+ if (unlikely(!gr_acl_is_enabled()))
81019+ return 0;
81020+
81021+ if (flags & RENAME_EXCHANGE) {
81022+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
81023+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81024+ GR_SUPPRESS, parent_mnt);
81025+ comp2 =
81026+ gr_search_file(old_dentry,
81027+ GR_READ | GR_WRITE | GR_AUDIT_READ |
81028+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
81029+ } else if (d_is_negative(new_dentry)) {
81030+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
81031+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
81032+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
81033+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
81034+ GR_DELETE | GR_AUDIT_DELETE |
81035+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81036+ GR_SUPPRESS, old_mnt);
81037+ } else {
81038+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
81039+ GR_CREATE | GR_DELETE |
81040+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
81041+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81042+ GR_SUPPRESS, parent_mnt);
81043+ comp2 =
81044+ gr_search_file(old_dentry,
81045+ GR_READ | GR_WRITE | GR_AUDIT_READ |
81046+ GR_DELETE | GR_AUDIT_DELETE |
81047+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
81048+ }
81049+
81050+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
81051+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
81052+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
81053+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
81054+ && !(comp2 & GR_SUPPRESS)) {
81055+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
81056+ error = -EACCES;
81057+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
81058+ error = -EACCES;
81059+
81060+ return error;
81061+}
81062+
81063+void
81064+gr_acl_handle_exit(void)
81065+{
81066+ u16 id;
81067+ char *rolename;
81068+
81069+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
81070+ !(current->role->roletype & GR_ROLE_PERSIST))) {
81071+ id = current->acl_role_id;
81072+ rolename = current->role->rolename;
81073+ gr_set_acls(1);
81074+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
81075+ }
81076+
81077+ gr_put_exec_file(current);
81078+ return;
81079+}
81080+
81081+int
81082+gr_acl_handle_procpidmem(const struct task_struct *task)
81083+{
81084+ if (unlikely(!gr_acl_is_enabled()))
81085+ return 0;
81086+
81087+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
81088+ !(current->acl->mode & GR_POVERRIDE) &&
81089+ !(current->role->roletype & GR_ROLE_GOD))
81090+ return -EACCES;
81091+
81092+ return 0;
81093+}
81094diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
81095new file mode 100644
81096index 0000000..f056b81
81097--- /dev/null
81098+++ b/grsecurity/gracl_ip.c
81099@@ -0,0 +1,386 @@
81100+#include <linux/kernel.h>
81101+#include <asm/uaccess.h>
81102+#include <asm/errno.h>
81103+#include <net/sock.h>
81104+#include <linux/file.h>
81105+#include <linux/fs.h>
81106+#include <linux/net.h>
81107+#include <linux/in.h>
81108+#include <linux/skbuff.h>
81109+#include <linux/ip.h>
81110+#include <linux/udp.h>
81111+#include <linux/types.h>
81112+#include <linux/sched.h>
81113+#include <linux/netdevice.h>
81114+#include <linux/inetdevice.h>
81115+#include <linux/gracl.h>
81116+#include <linux/grsecurity.h>
81117+#include <linux/grinternal.h>
81118+
81119+#define GR_BIND 0x01
81120+#define GR_CONNECT 0x02
81121+#define GR_INVERT 0x04
81122+#define GR_BINDOVERRIDE 0x08
81123+#define GR_CONNECTOVERRIDE 0x10
81124+#define GR_SOCK_FAMILY 0x20
81125+
81126+static const char * gr_protocols[IPPROTO_MAX] = {
81127+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
81128+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
81129+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
81130+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
81131+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
81132+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
81133+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
81134+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
81135+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
81136+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
81137+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
81138+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
81139+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
81140+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
81141+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
81142+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
81143+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
81144+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
81145+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
81146+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
81147+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
81148+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
81149+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
81150+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
81151+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
81152+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
81153+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
81154+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
81155+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
81156+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
81157+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
81158+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
81159+ };
81160+
81161+static const char * gr_socktypes[SOCK_MAX] = {
81162+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
81163+ "unknown:7", "unknown:8", "unknown:9", "packet"
81164+ };
81165+
81166+static const char * gr_sockfamilies[AF_MAX+1] = {
81167+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
81168+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
81169+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
81170+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
81171+ };
81172+
81173+const char *
81174+gr_proto_to_name(unsigned char proto)
81175+{
81176+ return gr_protocols[proto];
81177+}
81178+
81179+const char *
81180+gr_socktype_to_name(unsigned char type)
81181+{
81182+ return gr_socktypes[type];
81183+}
81184+
81185+const char *
81186+gr_sockfamily_to_name(unsigned char family)
81187+{
81188+ return gr_sockfamilies[family];
81189+}
81190+
81191+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
81192+
81193+int
81194+gr_search_socket(const int domain, const int type, const int protocol)
81195+{
81196+ struct acl_subject_label *curr;
81197+ const struct cred *cred = current_cred();
81198+
81199+ if (unlikely(!gr_acl_is_enabled()))
81200+ goto exit;
81201+
81202+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
81203+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
81204+ goto exit; // let the kernel handle it
81205+
81206+ curr = current->acl;
81207+
81208+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
81209+ /* the family is allowed, if this is PF_INET allow it only if
81210+ the extra sock type/protocol checks pass */
81211+ if (domain == PF_INET)
81212+ goto inet_check;
81213+ goto exit;
81214+ } else {
81215+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81216+ __u32 fakeip = 0;
81217+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81218+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81219+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81220+ gr_to_filename(current->exec_file->f_path.dentry,
81221+ current->exec_file->f_path.mnt) :
81222+ curr->filename, curr->filename,
81223+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
81224+ &current->signal->saved_ip);
81225+ goto exit;
81226+ }
81227+ goto exit_fail;
81228+ }
81229+
81230+inet_check:
81231+ /* the rest of this checking is for IPv4 only */
81232+ if (!curr->ips)
81233+ goto exit;
81234+
81235+ if ((curr->ip_type & (1U << type)) &&
81236+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
81237+ goto exit;
81238+
81239+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81240+ /* we don't place acls on raw sockets , and sometimes
81241+ dgram/ip sockets are opened for ioctl and not
81242+ bind/connect, so we'll fake a bind learn log */
81243+ if (type == SOCK_RAW || type == SOCK_PACKET) {
81244+ __u32 fakeip = 0;
81245+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81246+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81247+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81248+ gr_to_filename(current->exec_file->f_path.dentry,
81249+ current->exec_file->f_path.mnt) :
81250+ curr->filename, curr->filename,
81251+ &fakeip, 0, type,
81252+ protocol, GR_CONNECT, &current->signal->saved_ip);
81253+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
81254+ __u32 fakeip = 0;
81255+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81256+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81257+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81258+ gr_to_filename(current->exec_file->f_path.dentry,
81259+ current->exec_file->f_path.mnt) :
81260+ curr->filename, curr->filename,
81261+ &fakeip, 0, type,
81262+ protocol, GR_BIND, &current->signal->saved_ip);
81263+ }
81264+ /* we'll log when they use connect or bind */
81265+ goto exit;
81266+ }
81267+
81268+exit_fail:
81269+ if (domain == PF_INET)
81270+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
81271+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
81272+ else if (rcu_access_pointer(net_families[domain]) != NULL)
81273+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
81274+ gr_socktype_to_name(type), protocol);
81275+
81276+ return 0;
81277+exit:
81278+ return 1;
81279+}
81280+
81281+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
81282+{
81283+ if ((ip->mode & mode) &&
81284+ (ip_port >= ip->low) &&
81285+ (ip_port <= ip->high) &&
81286+ ((ntohl(ip_addr) & our_netmask) ==
81287+ (ntohl(our_addr) & our_netmask))
81288+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
81289+ && (ip->type & (1U << type))) {
81290+ if (ip->mode & GR_INVERT)
81291+ return 2; // specifically denied
81292+ else
81293+ return 1; // allowed
81294+ }
81295+
81296+ return 0; // not specifically allowed, may continue parsing
81297+}
81298+
81299+static int
81300+gr_search_connectbind(const int full_mode, struct sock *sk,
81301+ struct sockaddr_in *addr, const int type)
81302+{
81303+ char iface[IFNAMSIZ] = {0};
81304+ struct acl_subject_label *curr;
81305+ struct acl_ip_label *ip;
81306+ struct inet_sock *isk;
81307+ struct net_device *dev;
81308+ struct in_device *idev;
81309+ unsigned long i;
81310+ int ret;
81311+ int mode = full_mode & (GR_BIND | GR_CONNECT);
81312+ __u32 ip_addr = 0;
81313+ __u32 our_addr;
81314+ __u32 our_netmask;
81315+ char *p;
81316+ __u16 ip_port = 0;
81317+ const struct cred *cred = current_cred();
81318+
81319+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
81320+ return 0;
81321+
81322+ curr = current->acl;
81323+ isk = inet_sk(sk);
81324+
81325+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
81326+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
81327+ addr->sin_addr.s_addr = curr->inaddr_any_override;
81328+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
81329+ struct sockaddr_in saddr;
81330+ int err;
81331+
81332+ saddr.sin_family = AF_INET;
81333+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
81334+ saddr.sin_port = isk->inet_sport;
81335+
81336+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
81337+ if (err)
81338+ return err;
81339+
81340+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
81341+ if (err)
81342+ return err;
81343+ }
81344+
81345+ if (!curr->ips)
81346+ return 0;
81347+
81348+ ip_addr = addr->sin_addr.s_addr;
81349+ ip_port = ntohs(addr->sin_port);
81350+
81351+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81352+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81353+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81354+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81355+ gr_to_filename(current->exec_file->f_path.dentry,
81356+ current->exec_file->f_path.mnt) :
81357+ curr->filename, curr->filename,
81358+ &ip_addr, ip_port, type,
81359+ sk->sk_protocol, mode, &current->signal->saved_ip);
81360+ return 0;
81361+ }
81362+
81363+ for (i = 0; i < curr->ip_num; i++) {
81364+ ip = *(curr->ips + i);
81365+ if (ip->iface != NULL) {
81366+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
81367+ p = strchr(iface, ':');
81368+ if (p != NULL)
81369+ *p = '\0';
81370+ dev = dev_get_by_name(sock_net(sk), iface);
81371+ if (dev == NULL)
81372+ continue;
81373+ idev = in_dev_get(dev);
81374+ if (idev == NULL) {
81375+ dev_put(dev);
81376+ continue;
81377+ }
81378+ rcu_read_lock();
81379+ for_ifa(idev) {
81380+ if (!strcmp(ip->iface, ifa->ifa_label)) {
81381+ our_addr = ifa->ifa_address;
81382+ our_netmask = 0xffffffff;
81383+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
81384+ if (ret == 1) {
81385+ rcu_read_unlock();
81386+ in_dev_put(idev);
81387+ dev_put(dev);
81388+ return 0;
81389+ } else if (ret == 2) {
81390+ rcu_read_unlock();
81391+ in_dev_put(idev);
81392+ dev_put(dev);
81393+ goto denied;
81394+ }
81395+ }
81396+ } endfor_ifa(idev);
81397+ rcu_read_unlock();
81398+ in_dev_put(idev);
81399+ dev_put(dev);
81400+ } else {
81401+ our_addr = ip->addr;
81402+ our_netmask = ip->netmask;
81403+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
81404+ if (ret == 1)
81405+ return 0;
81406+ else if (ret == 2)
81407+ goto denied;
81408+ }
81409+ }
81410+
81411+denied:
81412+ if (mode == GR_BIND)
81413+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
81414+ else if (mode == GR_CONNECT)
81415+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
81416+
81417+ return -EACCES;
81418+}
81419+
81420+int
81421+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
81422+{
81423+ /* always allow disconnection of dgram sockets with connect */
81424+ if (addr->sin_family == AF_UNSPEC)
81425+ return 0;
81426+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
81427+}
81428+
81429+int
81430+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
81431+{
81432+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
81433+}
81434+
81435+int gr_search_listen(struct socket *sock)
81436+{
81437+ struct sock *sk = sock->sk;
81438+ struct sockaddr_in addr;
81439+
81440+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
81441+ addr.sin_port = inet_sk(sk)->inet_sport;
81442+
81443+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
81444+}
81445+
81446+int gr_search_accept(struct socket *sock)
81447+{
81448+ struct sock *sk = sock->sk;
81449+ struct sockaddr_in addr;
81450+
81451+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
81452+ addr.sin_port = inet_sk(sk)->inet_sport;
81453+
81454+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
81455+}
81456+
81457+int
81458+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
81459+{
81460+ if (addr)
81461+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
81462+ else {
81463+ struct sockaddr_in sin;
81464+ const struct inet_sock *inet = inet_sk(sk);
81465+
81466+ sin.sin_addr.s_addr = inet->inet_daddr;
81467+ sin.sin_port = inet->inet_dport;
81468+
81469+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
81470+ }
81471+}
81472+
81473+int
81474+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
81475+{
81476+ struct sockaddr_in sin;
81477+
81478+ if (unlikely(skb->len < sizeof (struct udphdr)))
81479+ return 0; // skip this packet
81480+
81481+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
81482+ sin.sin_port = udp_hdr(skb)->source;
81483+
81484+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
81485+}
81486diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
81487new file mode 100644
81488index 0000000..25f54ef
81489--- /dev/null
81490+++ b/grsecurity/gracl_learn.c
81491@@ -0,0 +1,207 @@
81492+#include <linux/kernel.h>
81493+#include <linux/mm.h>
81494+#include <linux/sched.h>
81495+#include <linux/poll.h>
81496+#include <linux/string.h>
81497+#include <linux/file.h>
81498+#include <linux/types.h>
81499+#include <linux/vmalloc.h>
81500+#include <linux/grinternal.h>
81501+
81502+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
81503+ size_t count, loff_t *ppos);
81504+extern int gr_acl_is_enabled(void);
81505+
81506+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
81507+static int gr_learn_attached;
81508+
81509+/* use a 512k buffer */
81510+#define LEARN_BUFFER_SIZE (512 * 1024)
81511+
81512+static DEFINE_SPINLOCK(gr_learn_lock);
81513+static DEFINE_MUTEX(gr_learn_user_mutex);
81514+
81515+/* we need to maintain two buffers, so that the kernel context of grlearn
81516+ uses a semaphore around the userspace copying, and the other kernel contexts
81517+ use a spinlock when copying into the buffer, since they cannot sleep
81518+*/
81519+static char *learn_buffer;
81520+static char *learn_buffer_user;
81521+static int learn_buffer_len;
81522+static int learn_buffer_user_len;
81523+
81524+static ssize_t
81525+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
81526+{
81527+ DECLARE_WAITQUEUE(wait, current);
81528+ ssize_t retval = 0;
81529+
81530+ add_wait_queue(&learn_wait, &wait);
81531+ set_current_state(TASK_INTERRUPTIBLE);
81532+ do {
81533+ mutex_lock(&gr_learn_user_mutex);
81534+ spin_lock(&gr_learn_lock);
81535+ if (learn_buffer_len)
81536+ break;
81537+ spin_unlock(&gr_learn_lock);
81538+ mutex_unlock(&gr_learn_user_mutex);
81539+ if (file->f_flags & O_NONBLOCK) {
81540+ retval = -EAGAIN;
81541+ goto out;
81542+ }
81543+ if (signal_pending(current)) {
81544+ retval = -ERESTARTSYS;
81545+ goto out;
81546+ }
81547+
81548+ schedule();
81549+ } while (1);
81550+
81551+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
81552+ learn_buffer_user_len = learn_buffer_len;
81553+ retval = learn_buffer_len;
81554+ learn_buffer_len = 0;
81555+
81556+ spin_unlock(&gr_learn_lock);
81557+
81558+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
81559+ retval = -EFAULT;
81560+
81561+ mutex_unlock(&gr_learn_user_mutex);
81562+out:
81563+ set_current_state(TASK_RUNNING);
81564+ remove_wait_queue(&learn_wait, &wait);
81565+ return retval;
81566+}
81567+
81568+static unsigned int
81569+poll_learn(struct file * file, poll_table * wait)
81570+{
81571+ poll_wait(file, &learn_wait, wait);
81572+
81573+ if (learn_buffer_len)
81574+ return (POLLIN | POLLRDNORM);
81575+
81576+ return 0;
81577+}
81578+
81579+void
81580+gr_clear_learn_entries(void)
81581+{
81582+ char *tmp;
81583+
81584+ mutex_lock(&gr_learn_user_mutex);
81585+ spin_lock(&gr_learn_lock);
81586+ tmp = learn_buffer;
81587+ learn_buffer = NULL;
81588+ spin_unlock(&gr_learn_lock);
81589+ if (tmp)
81590+ vfree(tmp);
81591+ if (learn_buffer_user != NULL) {
81592+ vfree(learn_buffer_user);
81593+ learn_buffer_user = NULL;
81594+ }
81595+ learn_buffer_len = 0;
81596+ mutex_unlock(&gr_learn_user_mutex);
81597+
81598+ return;
81599+}
81600+
81601+void
81602+gr_add_learn_entry(const char *fmt, ...)
81603+{
81604+ va_list args;
81605+ unsigned int len;
81606+
81607+ if (!gr_learn_attached)
81608+ return;
81609+
81610+ spin_lock(&gr_learn_lock);
81611+
81612+ /* leave a gap at the end so we know when it's "full" but don't have to
81613+ compute the exact length of the string we're trying to append
81614+ */
81615+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
81616+ spin_unlock(&gr_learn_lock);
81617+ wake_up_interruptible(&learn_wait);
81618+ return;
81619+ }
81620+ if (learn_buffer == NULL) {
81621+ spin_unlock(&gr_learn_lock);
81622+ return;
81623+ }
81624+
81625+ va_start(args, fmt);
81626+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
81627+ va_end(args);
81628+
81629+ learn_buffer_len += len + 1;
81630+
81631+ spin_unlock(&gr_learn_lock);
81632+ wake_up_interruptible(&learn_wait);
81633+
81634+ return;
81635+}
81636+
81637+static int
81638+open_learn(struct inode *inode, struct file *file)
81639+{
81640+ if (file->f_mode & FMODE_READ && gr_learn_attached)
81641+ return -EBUSY;
81642+ if (file->f_mode & FMODE_READ) {
81643+ int retval = 0;
81644+ mutex_lock(&gr_learn_user_mutex);
81645+ if (learn_buffer == NULL)
81646+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
81647+ if (learn_buffer_user == NULL)
81648+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
81649+ if (learn_buffer == NULL) {
81650+ retval = -ENOMEM;
81651+ goto out_error;
81652+ }
81653+ if (learn_buffer_user == NULL) {
81654+ retval = -ENOMEM;
81655+ goto out_error;
81656+ }
81657+ learn_buffer_len = 0;
81658+ learn_buffer_user_len = 0;
81659+ gr_learn_attached = 1;
81660+out_error:
81661+ mutex_unlock(&gr_learn_user_mutex);
81662+ return retval;
81663+ }
81664+ return 0;
81665+}
81666+
81667+static int
81668+close_learn(struct inode *inode, struct file *file)
81669+{
81670+ if (file->f_mode & FMODE_READ) {
81671+ char *tmp = NULL;
81672+ mutex_lock(&gr_learn_user_mutex);
81673+ spin_lock(&gr_learn_lock);
81674+ tmp = learn_buffer;
81675+ learn_buffer = NULL;
81676+ spin_unlock(&gr_learn_lock);
81677+ if (tmp)
81678+ vfree(tmp);
81679+ if (learn_buffer_user != NULL) {
81680+ vfree(learn_buffer_user);
81681+ learn_buffer_user = NULL;
81682+ }
81683+ learn_buffer_len = 0;
81684+ learn_buffer_user_len = 0;
81685+ gr_learn_attached = 0;
81686+ mutex_unlock(&gr_learn_user_mutex);
81687+ }
81688+
81689+ return 0;
81690+}
81691+
81692+const struct file_operations grsec_fops = {
81693+ .read = read_learn,
81694+ .write = write_grsec_handler,
81695+ .open = open_learn,
81696+ .release = close_learn,
81697+ .poll = poll_learn,
81698+};
81699diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
81700new file mode 100644
81701index 0000000..fd26052
81702--- /dev/null
81703+++ b/grsecurity/gracl_policy.c
81704@@ -0,0 +1,1781 @@
81705+#include <linux/kernel.h>
81706+#include <linux/module.h>
81707+#include <linux/sched.h>
81708+#include <linux/mm.h>
81709+#include <linux/file.h>
81710+#include <linux/fs.h>
81711+#include <linux/namei.h>
81712+#include <linux/mount.h>
81713+#include <linux/tty.h>
81714+#include <linux/proc_fs.h>
81715+#include <linux/lglock.h>
81716+#include <linux/slab.h>
81717+#include <linux/vmalloc.h>
81718+#include <linux/types.h>
81719+#include <linux/sysctl.h>
81720+#include <linux/netdevice.h>
81721+#include <linux/ptrace.h>
81722+#include <linux/gracl.h>
81723+#include <linux/gralloc.h>
81724+#include <linux/security.h>
81725+#include <linux/grinternal.h>
81726+#include <linux/pid_namespace.h>
81727+#include <linux/stop_machine.h>
81728+#include <linux/fdtable.h>
81729+#include <linux/percpu.h>
81730+#include <linux/lglock.h>
81731+#include <linux/hugetlb.h>
81732+#include <linux/posix-timers.h>
81733+#include "../fs/mount.h"
81734+
81735+#include <asm/uaccess.h>
81736+#include <asm/errno.h>
81737+#include <asm/mman.h>
81738+
81739+extern struct gr_policy_state *polstate;
81740+
81741+#define FOR_EACH_ROLE_START(role) \
81742+ role = polstate->role_list; \
81743+ while (role) {
81744+
81745+#define FOR_EACH_ROLE_END(role) \
81746+ role = role->prev; \
81747+ }
81748+
81749+struct path gr_real_root;
81750+
81751+extern struct gr_alloc_state *current_alloc_state;
81752+
81753+u16 acl_sp_role_value;
81754+
81755+static DEFINE_MUTEX(gr_dev_mutex);
81756+
81757+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
81758+extern void gr_clear_learn_entries(void);
81759+
81760+struct gr_arg *gr_usermode __read_only;
81761+unsigned char *gr_system_salt __read_only;
81762+unsigned char *gr_system_sum __read_only;
81763+
81764+static unsigned int gr_auth_attempts = 0;
81765+static unsigned long gr_auth_expires = 0UL;
81766+
81767+struct acl_object_label *fakefs_obj_rw;
81768+struct acl_object_label *fakefs_obj_rwx;
81769+
81770+extern int gr_init_uidset(void);
81771+extern void gr_free_uidset(void);
81772+extern void gr_remove_uid(uid_t uid);
81773+extern int gr_find_uid(uid_t uid);
81774+
81775+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
81776+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
81777+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
81778+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
81779+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
81780+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
81781+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
81782+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
81783+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
81784+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
81785+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
81786+extern void assign_special_role(const char *rolename);
81787+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
81788+extern int gr_rbac_disable(void *unused);
81789+extern void gr_enable_rbac_system(void);
81790+
81791+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
81792+{
81793+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
81794+ return -EFAULT;
81795+
81796+ return 0;
81797+}
81798+
81799+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
81800+{
81801+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
81802+ return -EFAULT;
81803+
81804+ return 0;
81805+}
81806+
81807+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
81808+{
81809+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
81810+ return -EFAULT;
81811+
81812+ return 0;
81813+}
81814+
81815+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
81816+{
81817+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
81818+ return -EFAULT;
81819+
81820+ return 0;
81821+}
81822+
81823+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
81824+{
81825+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
81826+ return -EFAULT;
81827+
81828+ return 0;
81829+}
81830+
81831+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
81832+{
81833+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
81834+ return -EFAULT;
81835+
81836+ return 0;
81837+}
81838+
81839+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
81840+{
81841+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
81842+ return -EFAULT;
81843+
81844+ return 0;
81845+}
81846+
81847+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
81848+{
81849+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
81850+ return -EFAULT;
81851+
81852+ return 0;
81853+}
81854+
81855+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
81856+{
81857+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
81858+ return -EFAULT;
81859+
81860+ return 0;
81861+}
81862+
81863+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
81864+{
81865+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
81866+ return -EFAULT;
81867+
81868+ if ((uwrap->version != GRSECURITY_VERSION) ||
81869+ (uwrap->size != sizeof(struct gr_arg)))
81870+ return -EINVAL;
81871+
81872+ return 0;
81873+}
81874+
81875+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
81876+{
81877+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
81878+ return -EFAULT;
81879+
81880+ return 0;
81881+}
81882+
81883+static size_t get_gr_arg_wrapper_size_normal(void)
81884+{
81885+ return sizeof(struct gr_arg_wrapper);
81886+}
81887+
81888+#ifdef CONFIG_COMPAT
81889+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
81890+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
81891+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
81892+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
81893+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
81894+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
81895+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
81896+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
81897+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
81898+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
81899+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
81900+extern size_t get_gr_arg_wrapper_size_compat(void);
81901+
81902+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
81903+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
81904+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
81905+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
81906+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
81907+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
81908+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
81909+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
81910+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
81911+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
81912+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
81913+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
81914+
81915+#else
81916+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
81917+#define copy_gr_arg copy_gr_arg_normal
81918+#define copy_gr_hash_struct copy_gr_hash_struct_normal
81919+#define copy_acl_object_label copy_acl_object_label_normal
81920+#define copy_acl_subject_label copy_acl_subject_label_normal
81921+#define copy_acl_role_label copy_acl_role_label_normal
81922+#define copy_acl_ip_label copy_acl_ip_label_normal
81923+#define copy_pointer_from_array copy_pointer_from_array_normal
81924+#define copy_sprole_pw copy_sprole_pw_normal
81925+#define copy_role_transition copy_role_transition_normal
81926+#define copy_role_allowed_ip copy_role_allowed_ip_normal
81927+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
81928+#endif
81929+
81930+static struct acl_subject_label *
81931+lookup_subject_map(const struct acl_subject_label *userp)
81932+{
81933+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
81934+ struct subject_map *match;
81935+
81936+ match = polstate->subj_map_set.s_hash[index];
81937+
81938+ while (match && match->user != userp)
81939+ match = match->next;
81940+
81941+ if (match != NULL)
81942+ return match->kernel;
81943+ else
81944+ return NULL;
81945+}
81946+
81947+static void
81948+insert_subj_map_entry(struct subject_map *subjmap)
81949+{
81950+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
81951+ struct subject_map **curr;
81952+
81953+ subjmap->prev = NULL;
81954+
81955+ curr = &polstate->subj_map_set.s_hash[index];
81956+ if (*curr != NULL)
81957+ (*curr)->prev = subjmap;
81958+
81959+ subjmap->next = *curr;
81960+ *curr = subjmap;
81961+
81962+ return;
81963+}
81964+
81965+static void
81966+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
81967+{
81968+ unsigned int index =
81969+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
81970+ struct acl_role_label **curr;
81971+ struct acl_role_label *tmp, *tmp2;
81972+
81973+ curr = &polstate->acl_role_set.r_hash[index];
81974+
81975+ /* simple case, slot is empty, just set it to our role */
81976+ if (*curr == NULL) {
81977+ *curr = role;
81978+ } else {
81979+ /* example:
81980+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
81981+ 2 -> 3
81982+ */
81983+ /* first check to see if we can already be reached via this slot */
81984+ tmp = *curr;
81985+ while (tmp && tmp != role)
81986+ tmp = tmp->next;
81987+ if (tmp == role) {
81988+ /* we don't need to add ourselves to this slot's chain */
81989+ return;
81990+ }
81991+ /* we need to add ourselves to this chain, two cases */
81992+ if (role->next == NULL) {
81993+ /* simple case, append the current chain to our role */
81994+ role->next = *curr;
81995+ *curr = role;
81996+ } else {
81997+ /* 1 -> 2 -> 3 -> 4
81998+ 2 -> 3 -> 4
81999+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
82000+ */
82001+ /* trickier case: walk our role's chain until we find
82002+ the role for the start of the current slot's chain */
82003+ tmp = role;
82004+ tmp2 = *curr;
82005+ while (tmp->next && tmp->next != tmp2)
82006+ tmp = tmp->next;
82007+ if (tmp->next == tmp2) {
82008+ /* from example above, we found 3, so just
82009+ replace this slot's chain with ours */
82010+ *curr = role;
82011+ } else {
82012+ /* we didn't find a subset of our role's chain
82013+ in the current slot's chain, so append their
82014+ chain to ours, and set us as the first role in
82015+ the slot's chain
82016+
82017+ we could fold this case with the case above,
82018+ but making it explicit for clarity
82019+ */
82020+ tmp->next = tmp2;
82021+ *curr = role;
82022+ }
82023+ }
82024+ }
82025+
82026+ return;
82027+}
82028+
82029+static void
82030+insert_acl_role_label(struct acl_role_label *role)
82031+{
82032+ int i;
82033+
82034+ if (polstate->role_list == NULL) {
82035+ polstate->role_list = role;
82036+ role->prev = NULL;
82037+ } else {
82038+ role->prev = polstate->role_list;
82039+ polstate->role_list = role;
82040+ }
82041+
82042+ /* used for hash chains */
82043+ role->next = NULL;
82044+
82045+ if (role->roletype & GR_ROLE_DOMAIN) {
82046+ for (i = 0; i < role->domain_child_num; i++)
82047+ __insert_acl_role_label(role, role->domain_children[i]);
82048+ } else
82049+ __insert_acl_role_label(role, role->uidgid);
82050+}
82051+
82052+static int
82053+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
82054+{
82055+ struct name_entry **curr, *nentry;
82056+ struct inodev_entry *ientry;
82057+ unsigned int len = strlen(name);
82058+ unsigned int key = full_name_hash(name, len);
82059+ unsigned int index = key % polstate->name_set.n_size;
82060+
82061+ curr = &polstate->name_set.n_hash[index];
82062+
82063+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
82064+ curr = &((*curr)->next);
82065+
82066+ if (*curr != NULL)
82067+ return 1;
82068+
82069+ nentry = acl_alloc(sizeof (struct name_entry));
82070+ if (nentry == NULL)
82071+ return 0;
82072+ ientry = acl_alloc(sizeof (struct inodev_entry));
82073+ if (ientry == NULL)
82074+ return 0;
82075+ ientry->nentry = nentry;
82076+
82077+ nentry->key = key;
82078+ nentry->name = name;
82079+ nentry->inode = inode;
82080+ nentry->device = device;
82081+ nentry->len = len;
82082+ nentry->deleted = deleted;
82083+
82084+ nentry->prev = NULL;
82085+ curr = &polstate->name_set.n_hash[index];
82086+ if (*curr != NULL)
82087+ (*curr)->prev = nentry;
82088+ nentry->next = *curr;
82089+ *curr = nentry;
82090+
82091+ /* insert us into the table searchable by inode/dev */
82092+ __insert_inodev_entry(polstate, ientry);
82093+
82094+ return 1;
82095+}
82096+
82097+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
82098+
82099+static void *
82100+create_table(__u32 * len, int elementsize)
82101+{
82102+ unsigned int table_sizes[] = {
82103+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
82104+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
82105+ 4194301, 8388593, 16777213, 33554393, 67108859
82106+ };
82107+ void *newtable = NULL;
82108+ unsigned int pwr = 0;
82109+
82110+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
82111+ table_sizes[pwr] <= *len)
82112+ pwr++;
82113+
82114+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
82115+ return newtable;
82116+
82117+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
82118+ newtable =
82119+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
82120+ else
82121+ newtable = vmalloc(table_sizes[pwr] * elementsize);
82122+
82123+ *len = table_sizes[pwr];
82124+
82125+ return newtable;
82126+}
82127+
82128+static int
82129+init_variables(const struct gr_arg *arg, bool reload)
82130+{
82131+ struct task_struct *reaper = init_pid_ns.child_reaper;
82132+ unsigned int stacksize;
82133+
82134+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
82135+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
82136+ polstate->name_set.n_size = arg->role_db.num_objects;
82137+ polstate->inodev_set.i_size = arg->role_db.num_objects;
82138+
82139+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
82140+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
82141+ return 1;
82142+
82143+ if (!reload) {
82144+ if (!gr_init_uidset())
82145+ return 1;
82146+ }
82147+
82148+ /* set up the stack that holds allocation info */
82149+
82150+ stacksize = arg->role_db.num_pointers + 5;
82151+
82152+ if (!acl_alloc_stack_init(stacksize))
82153+ return 1;
82154+
82155+ if (!reload) {
82156+ /* grab reference for the real root dentry and vfsmount */
82157+ get_fs_root(reaper->fs, &gr_real_root);
82158+
82159+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
82160+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
82161+#endif
82162+
82163+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
82164+ if (fakefs_obj_rw == NULL)
82165+ return 1;
82166+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
82167+
82168+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
82169+ if (fakefs_obj_rwx == NULL)
82170+ return 1;
82171+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
82172+ }
82173+
82174+ polstate->subj_map_set.s_hash =
82175+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
82176+ polstate->acl_role_set.r_hash =
82177+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
82178+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
82179+ polstate->inodev_set.i_hash =
82180+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
82181+
82182+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
82183+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
82184+ return 1;
82185+
82186+ memset(polstate->subj_map_set.s_hash, 0,
82187+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
82188+ memset(polstate->acl_role_set.r_hash, 0,
82189+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
82190+ memset(polstate->name_set.n_hash, 0,
82191+ sizeof (struct name_entry *) * polstate->name_set.n_size);
82192+ memset(polstate->inodev_set.i_hash, 0,
82193+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
82194+
82195+ return 0;
82196+}
82197+
82198+/* free information not needed after startup
82199+ currently contains user->kernel pointer mappings for subjects
82200+*/
82201+
82202+static void
82203+free_init_variables(void)
82204+{
82205+ __u32 i;
82206+
82207+ if (polstate->subj_map_set.s_hash) {
82208+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
82209+ if (polstate->subj_map_set.s_hash[i]) {
82210+ kfree(polstate->subj_map_set.s_hash[i]);
82211+ polstate->subj_map_set.s_hash[i] = NULL;
82212+ }
82213+ }
82214+
82215+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
82216+ PAGE_SIZE)
82217+ kfree(polstate->subj_map_set.s_hash);
82218+ else
82219+ vfree(polstate->subj_map_set.s_hash);
82220+ }
82221+
82222+ return;
82223+}
82224+
82225+static void
82226+free_variables(bool reload)
82227+{
82228+ struct acl_subject_label *s;
82229+ struct acl_role_label *r;
82230+ struct task_struct *task, *task2;
82231+ unsigned int x;
82232+
82233+ if (!reload) {
82234+ gr_clear_learn_entries();
82235+
82236+ read_lock(&tasklist_lock);
82237+ do_each_thread(task2, task) {
82238+ task->acl_sp_role = 0;
82239+ task->acl_role_id = 0;
82240+ task->inherited = 0;
82241+ task->acl = NULL;
82242+ task->role = NULL;
82243+ } while_each_thread(task2, task);
82244+ read_unlock(&tasklist_lock);
82245+
82246+ kfree(fakefs_obj_rw);
82247+ fakefs_obj_rw = NULL;
82248+ kfree(fakefs_obj_rwx);
82249+ fakefs_obj_rwx = NULL;
82250+
82251+ /* release the reference to the real root dentry and vfsmount */
82252+ path_put(&gr_real_root);
82253+ memset(&gr_real_root, 0, sizeof(gr_real_root));
82254+ }
82255+
82256+ /* free all object hash tables */
82257+
82258+ FOR_EACH_ROLE_START(r)
82259+ if (r->subj_hash == NULL)
82260+ goto next_role;
82261+ FOR_EACH_SUBJECT_START(r, s, x)
82262+ if (s->obj_hash == NULL)
82263+ break;
82264+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
82265+ kfree(s->obj_hash);
82266+ else
82267+ vfree(s->obj_hash);
82268+ FOR_EACH_SUBJECT_END(s, x)
82269+ FOR_EACH_NESTED_SUBJECT_START(r, s)
82270+ if (s->obj_hash == NULL)
82271+ break;
82272+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
82273+ kfree(s->obj_hash);
82274+ else
82275+ vfree(s->obj_hash);
82276+ FOR_EACH_NESTED_SUBJECT_END(s)
82277+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
82278+ kfree(r->subj_hash);
82279+ else
82280+ vfree(r->subj_hash);
82281+ r->subj_hash = NULL;
82282+next_role:
82283+ FOR_EACH_ROLE_END(r)
82284+
82285+ acl_free_all();
82286+
82287+ if (polstate->acl_role_set.r_hash) {
82288+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
82289+ PAGE_SIZE)
82290+ kfree(polstate->acl_role_set.r_hash);
82291+ else
82292+ vfree(polstate->acl_role_set.r_hash);
82293+ }
82294+ if (polstate->name_set.n_hash) {
82295+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
82296+ PAGE_SIZE)
82297+ kfree(polstate->name_set.n_hash);
82298+ else
82299+ vfree(polstate->name_set.n_hash);
82300+ }
82301+
82302+ if (polstate->inodev_set.i_hash) {
82303+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
82304+ PAGE_SIZE)
82305+ kfree(polstate->inodev_set.i_hash);
82306+ else
82307+ vfree(polstate->inodev_set.i_hash);
82308+ }
82309+
82310+ if (!reload)
82311+ gr_free_uidset();
82312+
82313+ memset(&polstate->name_set, 0, sizeof (struct name_db));
82314+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
82315+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
82316+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
82317+
82318+ polstate->default_role = NULL;
82319+ polstate->kernel_role = NULL;
82320+ polstate->role_list = NULL;
82321+
82322+ return;
82323+}
82324+
82325+static struct acl_subject_label *
82326+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
82327+
82328+static int alloc_and_copy_string(char **name, unsigned int maxlen)
82329+{
82330+ unsigned int len = strnlen_user(*name, maxlen);
82331+ char *tmp;
82332+
82333+ if (!len || len >= maxlen)
82334+ return -EINVAL;
82335+
82336+ if ((tmp = (char *) acl_alloc(len)) == NULL)
82337+ return -ENOMEM;
82338+
82339+ if (copy_from_user(tmp, *name, len))
82340+ return -EFAULT;
82341+
82342+ tmp[len-1] = '\0';
82343+ *name = tmp;
82344+
82345+ return 0;
82346+}
82347+
82348+static int
82349+copy_user_glob(struct acl_object_label *obj)
82350+{
82351+ struct acl_object_label *g_tmp, **guser;
82352+ int error;
82353+
82354+ if (obj->globbed == NULL)
82355+ return 0;
82356+
82357+ guser = &obj->globbed;
82358+ while (*guser) {
82359+ g_tmp = (struct acl_object_label *)
82360+ acl_alloc(sizeof (struct acl_object_label));
82361+ if (g_tmp == NULL)
82362+ return -ENOMEM;
82363+
82364+ if (copy_acl_object_label(g_tmp, *guser))
82365+ return -EFAULT;
82366+
82367+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
82368+ if (error)
82369+ return error;
82370+
82371+ *guser = g_tmp;
82372+ guser = &(g_tmp->next);
82373+ }
82374+
82375+ return 0;
82376+}
82377+
82378+static int
82379+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
82380+ struct acl_role_label *role)
82381+{
82382+ struct acl_object_label *o_tmp;
82383+ int ret;
82384+
82385+ while (userp) {
82386+ if ((o_tmp = (struct acl_object_label *)
82387+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
82388+ return -ENOMEM;
82389+
82390+ if (copy_acl_object_label(o_tmp, userp))
82391+ return -EFAULT;
82392+
82393+ userp = o_tmp->prev;
82394+
82395+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
82396+ if (ret)
82397+ return ret;
82398+
82399+ insert_acl_obj_label(o_tmp, subj);
82400+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
82401+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
82402+ return -ENOMEM;
82403+
82404+ ret = copy_user_glob(o_tmp);
82405+ if (ret)
82406+ return ret;
82407+
82408+ if (o_tmp->nested) {
82409+ int already_copied;
82410+
82411+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
82412+ if (IS_ERR(o_tmp->nested))
82413+ return PTR_ERR(o_tmp->nested);
82414+
82415+ /* insert into nested subject list if we haven't copied this one yet
82416+ to prevent duplicate entries */
82417+ if (!already_copied) {
82418+ o_tmp->nested->next = role->hash->first;
82419+ role->hash->first = o_tmp->nested;
82420+ }
82421+ }
82422+ }
82423+
82424+ return 0;
82425+}
82426+
82427+static __u32
82428+count_user_subjs(struct acl_subject_label *userp)
82429+{
82430+ struct acl_subject_label s_tmp;
82431+ __u32 num = 0;
82432+
82433+ while (userp) {
82434+ if (copy_acl_subject_label(&s_tmp, userp))
82435+ break;
82436+
82437+ userp = s_tmp.prev;
82438+ }
82439+
82440+ return num;
82441+}
82442+
82443+static int
82444+copy_user_allowedips(struct acl_role_label *rolep)
82445+{
82446+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
82447+
82448+ ruserip = rolep->allowed_ips;
82449+
82450+ while (ruserip) {
82451+ rlast = rtmp;
82452+
82453+ if ((rtmp = (struct role_allowed_ip *)
82454+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
82455+ return -ENOMEM;
82456+
82457+ if (copy_role_allowed_ip(rtmp, ruserip))
82458+ return -EFAULT;
82459+
82460+ ruserip = rtmp->prev;
82461+
82462+ if (!rlast) {
82463+ rtmp->prev = NULL;
82464+ rolep->allowed_ips = rtmp;
82465+ } else {
82466+ rlast->next = rtmp;
82467+ rtmp->prev = rlast;
82468+ }
82469+
82470+ if (!ruserip)
82471+ rtmp->next = NULL;
82472+ }
82473+
82474+ return 0;
82475+}
82476+
82477+static int
82478+copy_user_transitions(struct acl_role_label *rolep)
82479+{
82480+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
82481+ int error;
82482+
82483+ rusertp = rolep->transitions;
82484+
82485+ while (rusertp) {
82486+ rlast = rtmp;
82487+
82488+ if ((rtmp = (struct role_transition *)
82489+ acl_alloc(sizeof (struct role_transition))) == NULL)
82490+ return -ENOMEM;
82491+
82492+ if (copy_role_transition(rtmp, rusertp))
82493+ return -EFAULT;
82494+
82495+ rusertp = rtmp->prev;
82496+
82497+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
82498+ if (error)
82499+ return error;
82500+
82501+ if (!rlast) {
82502+ rtmp->prev = NULL;
82503+ rolep->transitions = rtmp;
82504+ } else {
82505+ rlast->next = rtmp;
82506+ rtmp->prev = rlast;
82507+ }
82508+
82509+ if (!rusertp)
82510+ rtmp->next = NULL;
82511+ }
82512+
82513+ return 0;
82514+}
82515+
82516+static __u32 count_user_objs(const struct acl_object_label __user *userp)
82517+{
82518+ struct acl_object_label o_tmp;
82519+ __u32 num = 0;
82520+
82521+ while (userp) {
82522+ if (copy_acl_object_label(&o_tmp, userp))
82523+ break;
82524+
82525+ userp = o_tmp.prev;
82526+ num++;
82527+ }
82528+
82529+ return num;
82530+}
82531+
82532+static struct acl_subject_label *
82533+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
82534+{
82535+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
82536+ __u32 num_objs;
82537+ struct acl_ip_label **i_tmp, *i_utmp2;
82538+ struct gr_hash_struct ghash;
82539+ struct subject_map *subjmap;
82540+ unsigned int i_num;
82541+ int err;
82542+
82543+ if (already_copied != NULL)
82544+ *already_copied = 0;
82545+
82546+ s_tmp = lookup_subject_map(userp);
82547+
82548+ /* we've already copied this subject into the kernel, just return
82549+ the reference to it, and don't copy it over again
82550+ */
82551+ if (s_tmp) {
82552+ if (already_copied != NULL)
82553+ *already_copied = 1;
82554+ return(s_tmp);
82555+ }
82556+
82557+ if ((s_tmp = (struct acl_subject_label *)
82558+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
82559+ return ERR_PTR(-ENOMEM);
82560+
82561+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
82562+ if (subjmap == NULL)
82563+ return ERR_PTR(-ENOMEM);
82564+
82565+ subjmap->user = userp;
82566+ subjmap->kernel = s_tmp;
82567+ insert_subj_map_entry(subjmap);
82568+
82569+ if (copy_acl_subject_label(s_tmp, userp))
82570+ return ERR_PTR(-EFAULT);
82571+
82572+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
82573+ if (err)
82574+ return ERR_PTR(err);
82575+
82576+ if (!strcmp(s_tmp->filename, "/"))
82577+ role->root_label = s_tmp;
82578+
82579+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
82580+ return ERR_PTR(-EFAULT);
82581+
82582+ /* copy user and group transition tables */
82583+
82584+ if (s_tmp->user_trans_num) {
82585+ uid_t *uidlist;
82586+
82587+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
82588+ if (uidlist == NULL)
82589+ return ERR_PTR(-ENOMEM);
82590+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
82591+ return ERR_PTR(-EFAULT);
82592+
82593+ s_tmp->user_transitions = uidlist;
82594+ }
82595+
82596+ if (s_tmp->group_trans_num) {
82597+ gid_t *gidlist;
82598+
82599+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
82600+ if (gidlist == NULL)
82601+ return ERR_PTR(-ENOMEM);
82602+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
82603+ return ERR_PTR(-EFAULT);
82604+
82605+ s_tmp->group_transitions = gidlist;
82606+ }
82607+
82608+ /* set up object hash table */
82609+ num_objs = count_user_objs(ghash.first);
82610+
82611+ s_tmp->obj_hash_size = num_objs;
82612+ s_tmp->obj_hash =
82613+ (struct acl_object_label **)
82614+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
82615+
82616+ if (!s_tmp->obj_hash)
82617+ return ERR_PTR(-ENOMEM);
82618+
82619+ memset(s_tmp->obj_hash, 0,
82620+ s_tmp->obj_hash_size *
82621+ sizeof (struct acl_object_label *));
82622+
82623+ /* add in objects */
82624+ err = copy_user_objs(ghash.first, s_tmp, role);
82625+
82626+ if (err)
82627+ return ERR_PTR(err);
82628+
82629+ /* set pointer for parent subject */
82630+ if (s_tmp->parent_subject) {
82631+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
82632+
82633+ if (IS_ERR(s_tmp2))
82634+ return s_tmp2;
82635+
82636+ s_tmp->parent_subject = s_tmp2;
82637+ }
82638+
82639+ /* add in ip acls */
82640+
82641+ if (!s_tmp->ip_num) {
82642+ s_tmp->ips = NULL;
82643+ goto insert;
82644+ }
82645+
82646+ i_tmp =
82647+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
82648+ sizeof (struct acl_ip_label *));
82649+
82650+ if (!i_tmp)
82651+ return ERR_PTR(-ENOMEM);
82652+
82653+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
82654+ *(i_tmp + i_num) =
82655+ (struct acl_ip_label *)
82656+ acl_alloc(sizeof (struct acl_ip_label));
82657+ if (!*(i_tmp + i_num))
82658+ return ERR_PTR(-ENOMEM);
82659+
82660+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
82661+ return ERR_PTR(-EFAULT);
82662+
82663+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
82664+ return ERR_PTR(-EFAULT);
82665+
82666+ if ((*(i_tmp + i_num))->iface == NULL)
82667+ continue;
82668+
82669+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
82670+ if (err)
82671+ return ERR_PTR(err);
82672+ }
82673+
82674+ s_tmp->ips = i_tmp;
82675+
82676+insert:
82677+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
82678+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
82679+ return ERR_PTR(-ENOMEM);
82680+
82681+ return s_tmp;
82682+}
82683+
82684+static int
82685+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
82686+{
82687+ struct acl_subject_label s_pre;
82688+ struct acl_subject_label * ret;
82689+ int err;
82690+
82691+ while (userp) {
82692+ if (copy_acl_subject_label(&s_pre, userp))
82693+ return -EFAULT;
82694+
82695+ ret = do_copy_user_subj(userp, role, NULL);
82696+
82697+ err = PTR_ERR(ret);
82698+ if (IS_ERR(ret))
82699+ return err;
82700+
82701+ insert_acl_subj_label(ret, role);
82702+
82703+ userp = s_pre.prev;
82704+ }
82705+
82706+ return 0;
82707+}
82708+
82709+static int
82710+copy_user_acl(struct gr_arg *arg)
82711+{
82712+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
82713+ struct acl_subject_label *subj_list;
82714+ struct sprole_pw *sptmp;
82715+ struct gr_hash_struct *ghash;
82716+ uid_t *domainlist;
82717+ unsigned int r_num;
82718+ int err = 0;
82719+ __u16 i;
82720+ __u32 num_subjs;
82721+
82722+ /* we need a default and kernel role */
82723+ if (arg->role_db.num_roles < 2)
82724+ return -EINVAL;
82725+
82726+ /* copy special role authentication info from userspace */
82727+
82728+ polstate->num_sprole_pws = arg->num_sprole_pws;
82729+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
82730+
82731+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
82732+ return -ENOMEM;
82733+
82734+ for (i = 0; i < polstate->num_sprole_pws; i++) {
82735+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
82736+ if (!sptmp)
82737+ return -ENOMEM;
82738+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
82739+ return -EFAULT;
82740+
82741+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
82742+ if (err)
82743+ return err;
82744+
82745+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
82746+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
82747+#endif
82748+
82749+ polstate->acl_special_roles[i] = sptmp;
82750+ }
82751+
82752+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
82753+
82754+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
82755+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
82756+
82757+ if (!r_tmp)
82758+ return -ENOMEM;
82759+
82760+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
82761+ return -EFAULT;
82762+
82763+ if (copy_acl_role_label(r_tmp, r_utmp2))
82764+ return -EFAULT;
82765+
82766+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
82767+ if (err)
82768+ return err;
82769+
82770+ if (!strcmp(r_tmp->rolename, "default")
82771+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
82772+ polstate->default_role = r_tmp;
82773+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
82774+ polstate->kernel_role = r_tmp;
82775+ }
82776+
82777+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
82778+ return -ENOMEM;
82779+
82780+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
82781+ return -EFAULT;
82782+
82783+ r_tmp->hash = ghash;
82784+
82785+ num_subjs = count_user_subjs(r_tmp->hash->first);
82786+
82787+ r_tmp->subj_hash_size = num_subjs;
82788+ r_tmp->subj_hash =
82789+ (struct acl_subject_label **)
82790+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
82791+
82792+ if (!r_tmp->subj_hash)
82793+ return -ENOMEM;
82794+
82795+ err = copy_user_allowedips(r_tmp);
82796+ if (err)
82797+ return err;
82798+
82799+ /* copy domain info */
82800+ if (r_tmp->domain_children != NULL) {
82801+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
82802+ if (domainlist == NULL)
82803+ return -ENOMEM;
82804+
82805+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
82806+ return -EFAULT;
82807+
82808+ r_tmp->domain_children = domainlist;
82809+ }
82810+
82811+ err = copy_user_transitions(r_tmp);
82812+ if (err)
82813+ return err;
82814+
82815+ memset(r_tmp->subj_hash, 0,
82816+ r_tmp->subj_hash_size *
82817+ sizeof (struct acl_subject_label *));
82818+
82819+ /* acquire the list of subjects, then NULL out
82820+ the list prior to parsing the subjects for this role,
82821+ as during this parsing the list is replaced with a list
82822+ of *nested* subjects for the role
82823+ */
82824+ subj_list = r_tmp->hash->first;
82825+
82826+ /* set nested subject list to null */
82827+ r_tmp->hash->first = NULL;
82828+
82829+ err = copy_user_subjs(subj_list, r_tmp);
82830+
82831+ if (err)
82832+ return err;
82833+
82834+ insert_acl_role_label(r_tmp);
82835+ }
82836+
82837+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
82838+ return -EINVAL;
82839+
82840+ return err;
82841+}
82842+
82843+static int gracl_reload_apply_policies(void *reload)
82844+{
82845+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
82846+ struct task_struct *task, *task2;
82847+ struct acl_role_label *role, *rtmp;
82848+ struct acl_subject_label *subj;
82849+ const struct cred *cred;
82850+ int role_applied;
82851+ int ret = 0;
82852+
82853+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
82854+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
82855+
82856+ /* first make sure we'll be able to apply the new policy cleanly */
82857+ do_each_thread(task2, task) {
82858+ if (task->exec_file == NULL)
82859+ continue;
82860+ role_applied = 0;
82861+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
82862+ /* preserve special roles */
82863+ FOR_EACH_ROLE_START(role)
82864+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
82865+ rtmp = task->role;
82866+ task->role = role;
82867+ role_applied = 1;
82868+ break;
82869+ }
82870+ FOR_EACH_ROLE_END(role)
82871+ }
82872+ if (!role_applied) {
82873+ cred = __task_cred(task);
82874+ rtmp = task->role;
82875+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
82876+ }
82877+ /* this handles non-nested inherited subjects, nested subjects will still
82878+ be dropped currently */
82879+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
82880+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
82881+ /* change the role back so that we've made no modifications to the policy */
82882+ task->role = rtmp;
82883+
82884+ if (subj == NULL || task->tmpacl == NULL) {
82885+ ret = -EINVAL;
82886+ goto out;
82887+ }
82888+ } while_each_thread(task2, task);
82889+
82890+ /* now actually apply the policy */
82891+
82892+ do_each_thread(task2, task) {
82893+ if (task->exec_file) {
82894+ role_applied = 0;
82895+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
82896+ /* preserve special roles */
82897+ FOR_EACH_ROLE_START(role)
82898+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
82899+ task->role = role;
82900+ role_applied = 1;
82901+ break;
82902+ }
82903+ FOR_EACH_ROLE_END(role)
82904+ }
82905+ if (!role_applied) {
82906+ cred = __task_cred(task);
82907+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
82908+ }
82909+ /* this handles non-nested inherited subjects, nested subjects will still
82910+ be dropped currently */
82911+ if (!reload_state->oldmode && task->inherited)
82912+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
82913+ else {
82914+ /* looked up and tagged to the task previously */
82915+ subj = task->tmpacl;
82916+ }
82917+ /* subj will be non-null */
82918+ __gr_apply_subject_to_task(polstate, task, subj);
82919+ if (reload_state->oldmode) {
82920+ task->acl_role_id = 0;
82921+ task->acl_sp_role = 0;
82922+ task->inherited = 0;
82923+ }
82924+ } else {
82925+ // it's a kernel process
82926+ task->role = polstate->kernel_role;
82927+ task->acl = polstate->kernel_role->root_label;
82928+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
82929+ task->acl->mode &= ~GR_PROCFIND;
82930+#endif
82931+ }
82932+ } while_each_thread(task2, task);
82933+
82934+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
82935+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
82936+
82937+out:
82938+
82939+ return ret;
82940+}
82941+
82942+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
82943+{
82944+ struct gr_reload_state new_reload_state = { };
82945+ int err;
82946+
82947+ new_reload_state.oldpolicy_ptr = polstate;
82948+ new_reload_state.oldalloc_ptr = current_alloc_state;
82949+ new_reload_state.oldmode = oldmode;
82950+
82951+ current_alloc_state = &new_reload_state.newalloc;
82952+ polstate = &new_reload_state.newpolicy;
82953+
82954+ /* everything relevant is now saved off, copy in the new policy */
82955+ if (init_variables(args, true)) {
82956+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
82957+ err = -ENOMEM;
82958+ goto error;
82959+ }
82960+
82961+ err = copy_user_acl(args);
82962+ free_init_variables();
82963+ if (err)
82964+ goto error;
82965+ /* the new policy is copied in, with the old policy available via saved_state
82966+ first go through applying roles, making sure to preserve special roles
82967+ then apply new subjects, making sure to preserve inherited and nested subjects,
82968+ though currently only inherited subjects will be preserved
82969+ */
82970+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
82971+ if (err)
82972+ goto error;
82973+
82974+ /* we've now applied the new policy, so restore the old policy state to free it */
82975+ polstate = &new_reload_state.oldpolicy;
82976+ current_alloc_state = &new_reload_state.oldalloc;
82977+ free_variables(true);
82978+
82979+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
82980+ to running_polstate/current_alloc_state inside stop_machine
82981+ */
82982+ err = 0;
82983+ goto out;
82984+error:
82985+ /* on error of loading the new policy, we'll just keep the previous
82986+ policy set around
82987+ */
82988+ free_variables(true);
82989+
82990+ /* doesn't affect runtime, but maintains consistent state */
82991+out:
82992+ polstate = new_reload_state.oldpolicy_ptr;
82993+ current_alloc_state = new_reload_state.oldalloc_ptr;
82994+
82995+ return err;
82996+}
82997+
82998+static int
82999+gracl_init(struct gr_arg *args)
83000+{
83001+ int error = 0;
83002+
83003+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
83004+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
83005+
83006+ if (init_variables(args, false)) {
83007+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
83008+ error = -ENOMEM;
83009+ goto out;
83010+ }
83011+
83012+ error = copy_user_acl(args);
83013+ free_init_variables();
83014+ if (error)
83015+ goto out;
83016+
83017+ error = gr_set_acls(0);
83018+ if (error)
83019+ goto out;
83020+
83021+ gr_enable_rbac_system();
83022+
83023+ return 0;
83024+
83025+out:
83026+ free_variables(false);
83027+ return error;
83028+}
83029+
83030+static int
83031+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
83032+ unsigned char **sum)
83033+{
83034+ struct acl_role_label *r;
83035+ struct role_allowed_ip *ipp;
83036+ struct role_transition *trans;
83037+ unsigned int i;
83038+ int found = 0;
83039+ u32 curr_ip = current->signal->curr_ip;
83040+
83041+ current->signal->saved_ip = curr_ip;
83042+
83043+ /* check transition table */
83044+
83045+ for (trans = current->role->transitions; trans; trans = trans->next) {
83046+ if (!strcmp(rolename, trans->rolename)) {
83047+ found = 1;
83048+ break;
83049+ }
83050+ }
83051+
83052+ if (!found)
83053+ return 0;
83054+
83055+ /* handle special roles that do not require authentication
83056+ and check ip */
83057+
83058+ FOR_EACH_ROLE_START(r)
83059+ if (!strcmp(rolename, r->rolename) &&
83060+ (r->roletype & GR_ROLE_SPECIAL)) {
83061+ found = 0;
83062+ if (r->allowed_ips != NULL) {
83063+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
83064+ if ((ntohl(curr_ip) & ipp->netmask) ==
83065+ (ntohl(ipp->addr) & ipp->netmask))
83066+ found = 1;
83067+ }
83068+ } else
83069+ found = 2;
83070+ if (!found)
83071+ return 0;
83072+
83073+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
83074+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
83075+ *salt = NULL;
83076+ *sum = NULL;
83077+ return 1;
83078+ }
83079+ }
83080+ FOR_EACH_ROLE_END(r)
83081+
83082+ for (i = 0; i < polstate->num_sprole_pws; i++) {
83083+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
83084+ *salt = polstate->acl_special_roles[i]->salt;
83085+ *sum = polstate->acl_special_roles[i]->sum;
83086+ return 1;
83087+ }
83088+ }
83089+
83090+ return 0;
83091+}
83092+
83093+int gr_check_secure_terminal(struct task_struct *task)
83094+{
83095+ struct task_struct *p, *p2, *p3;
83096+ struct files_struct *files;
83097+ struct fdtable *fdt;
83098+ struct file *our_file = NULL, *file;
83099+ int i;
83100+
83101+ if (task->signal->tty == NULL)
83102+ return 1;
83103+
83104+ files = get_files_struct(task);
83105+ if (files != NULL) {
83106+ rcu_read_lock();
83107+ fdt = files_fdtable(files);
83108+ for (i=0; i < fdt->max_fds; i++) {
83109+ file = fcheck_files(files, i);
83110+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
83111+ get_file(file);
83112+ our_file = file;
83113+ }
83114+ }
83115+ rcu_read_unlock();
83116+ put_files_struct(files);
83117+ }
83118+
83119+ if (our_file == NULL)
83120+ return 1;
83121+
83122+ read_lock(&tasklist_lock);
83123+ do_each_thread(p2, p) {
83124+ files = get_files_struct(p);
83125+ if (files == NULL ||
83126+ (p->signal && p->signal->tty == task->signal->tty)) {
83127+ if (files != NULL)
83128+ put_files_struct(files);
83129+ continue;
83130+ }
83131+ rcu_read_lock();
83132+ fdt = files_fdtable(files);
83133+ for (i=0; i < fdt->max_fds; i++) {
83134+ file = fcheck_files(files, i);
83135+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
83136+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
83137+ p3 = task;
83138+ while (task_pid_nr(p3) > 0) {
83139+ if (p3 == p)
83140+ break;
83141+ p3 = p3->real_parent;
83142+ }
83143+ if (p3 == p)
83144+ break;
83145+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
83146+ gr_handle_alertkill(p);
83147+ rcu_read_unlock();
83148+ put_files_struct(files);
83149+ read_unlock(&tasklist_lock);
83150+ fput(our_file);
83151+ return 0;
83152+ }
83153+ }
83154+ rcu_read_unlock();
83155+ put_files_struct(files);
83156+ } while_each_thread(p2, p);
83157+ read_unlock(&tasklist_lock);
83158+
83159+ fput(our_file);
83160+ return 1;
83161+}
83162+
83163+ssize_t
83164+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
83165+{
83166+ struct gr_arg_wrapper uwrap;
83167+ unsigned char *sprole_salt = NULL;
83168+ unsigned char *sprole_sum = NULL;
83169+ int error = 0;
83170+ int error2 = 0;
83171+ size_t req_count = 0;
83172+ unsigned char oldmode = 0;
83173+
83174+ mutex_lock(&gr_dev_mutex);
83175+
83176+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
83177+ error = -EPERM;
83178+ goto out;
83179+ }
83180+
83181+#ifdef CONFIG_COMPAT
83182+ pax_open_kernel();
83183+ if (is_compat_task()) {
83184+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
83185+ copy_gr_arg = &copy_gr_arg_compat;
83186+ copy_acl_object_label = &copy_acl_object_label_compat;
83187+ copy_acl_subject_label = &copy_acl_subject_label_compat;
83188+ copy_acl_role_label = &copy_acl_role_label_compat;
83189+ copy_acl_ip_label = &copy_acl_ip_label_compat;
83190+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
83191+ copy_role_transition = &copy_role_transition_compat;
83192+ copy_sprole_pw = &copy_sprole_pw_compat;
83193+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
83194+ copy_pointer_from_array = &copy_pointer_from_array_compat;
83195+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
83196+ } else {
83197+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
83198+ copy_gr_arg = &copy_gr_arg_normal;
83199+ copy_acl_object_label = &copy_acl_object_label_normal;
83200+ copy_acl_subject_label = &copy_acl_subject_label_normal;
83201+ copy_acl_role_label = &copy_acl_role_label_normal;
83202+ copy_acl_ip_label = &copy_acl_ip_label_normal;
83203+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
83204+ copy_role_transition = &copy_role_transition_normal;
83205+ copy_sprole_pw = &copy_sprole_pw_normal;
83206+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
83207+ copy_pointer_from_array = &copy_pointer_from_array_normal;
83208+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
83209+ }
83210+ pax_close_kernel();
83211+#endif
83212+
83213+ req_count = get_gr_arg_wrapper_size();
83214+
83215+ if (count != req_count) {
83216+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
83217+ error = -EINVAL;
83218+ goto out;
83219+ }
83220+
83221+
83222+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
83223+ gr_auth_expires = 0;
83224+ gr_auth_attempts = 0;
83225+ }
83226+
83227+ error = copy_gr_arg_wrapper(buf, &uwrap);
83228+ if (error)
83229+ goto out;
83230+
83231+ error = copy_gr_arg(uwrap.arg, gr_usermode);
83232+ if (error)
83233+ goto out;
83234+
83235+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
83236+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
83237+ time_after(gr_auth_expires, get_seconds())) {
83238+ error = -EBUSY;
83239+ goto out;
83240+ }
83241+
83242+ /* if non-root trying to do anything other than use a special role,
83243+ do not attempt authentication, do not count towards authentication
83244+ locking
83245+ */
83246+
83247+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
83248+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
83249+ gr_is_global_nonroot(current_uid())) {
83250+ error = -EPERM;
83251+ goto out;
83252+ }
83253+
83254+ /* ensure pw and special role name are null terminated */
83255+
83256+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
83257+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
83258+
83259+ /* Okay.
83260+ * We have our enough of the argument structure..(we have yet
83261+ * to copy_from_user the tables themselves) . Copy the tables
83262+ * only if we need them, i.e. for loading operations. */
83263+
83264+ switch (gr_usermode->mode) {
83265+ case GR_STATUS:
83266+ if (gr_acl_is_enabled()) {
83267+ error = 1;
83268+ if (!gr_check_secure_terminal(current))
83269+ error = 3;
83270+ } else
83271+ error = 2;
83272+ goto out;
83273+ case GR_SHUTDOWN:
83274+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83275+ stop_machine(gr_rbac_disable, NULL, NULL);
83276+ free_variables(false);
83277+ memset(gr_usermode, 0, sizeof(struct gr_arg));
83278+ memset(gr_system_salt, 0, GR_SALT_LEN);
83279+ memset(gr_system_sum, 0, GR_SHA_LEN);
83280+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
83281+ } else if (gr_acl_is_enabled()) {
83282+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
83283+ error = -EPERM;
83284+ } else {
83285+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
83286+ error = -EAGAIN;
83287+ }
83288+ break;
83289+ case GR_ENABLE:
83290+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
83291+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
83292+ else {
83293+ if (gr_acl_is_enabled())
83294+ error = -EAGAIN;
83295+ else
83296+ error = error2;
83297+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
83298+ }
83299+ break;
83300+ case GR_OLDRELOAD:
83301+ oldmode = 1;
83302+ case GR_RELOAD:
83303+ if (!gr_acl_is_enabled()) {
83304+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
83305+ error = -EAGAIN;
83306+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83307+ error2 = gracl_reload(gr_usermode, oldmode);
83308+ if (!error2)
83309+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
83310+ else {
83311+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
83312+ error = error2;
83313+ }
83314+ } else {
83315+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
83316+ error = -EPERM;
83317+ }
83318+ break;
83319+ case GR_SEGVMOD:
83320+ if (unlikely(!gr_acl_is_enabled())) {
83321+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
83322+ error = -EAGAIN;
83323+ break;
83324+ }
83325+
83326+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83327+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
83328+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
83329+ struct acl_subject_label *segvacl;
83330+ segvacl =
83331+ lookup_acl_subj_label(gr_usermode->segv_inode,
83332+ gr_usermode->segv_device,
83333+ current->role);
83334+ if (segvacl) {
83335+ segvacl->crashes = 0;
83336+ segvacl->expires = 0;
83337+ }
83338+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
83339+ gr_remove_uid(gr_usermode->segv_uid);
83340+ }
83341+ } else {
83342+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
83343+ error = -EPERM;
83344+ }
83345+ break;
83346+ case GR_SPROLE:
83347+ case GR_SPROLEPAM:
83348+ if (unlikely(!gr_acl_is_enabled())) {
83349+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
83350+ error = -EAGAIN;
83351+ break;
83352+ }
83353+
83354+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
83355+ current->role->expires = 0;
83356+ current->role->auth_attempts = 0;
83357+ }
83358+
83359+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
83360+ time_after(current->role->expires, get_seconds())) {
83361+ error = -EBUSY;
83362+ goto out;
83363+ }
83364+
83365+ if (lookup_special_role_auth
83366+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
83367+ && ((!sprole_salt && !sprole_sum)
83368+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
83369+ char *p = "";
83370+ assign_special_role(gr_usermode->sp_role);
83371+ read_lock(&tasklist_lock);
83372+ if (current->real_parent)
83373+ p = current->real_parent->role->rolename;
83374+ read_unlock(&tasklist_lock);
83375+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
83376+ p, acl_sp_role_value);
83377+ } else {
83378+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
83379+ error = -EPERM;
83380+ if(!(current->role->auth_attempts++))
83381+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
83382+
83383+ goto out;
83384+ }
83385+ break;
83386+ case GR_UNSPROLE:
83387+ if (unlikely(!gr_acl_is_enabled())) {
83388+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
83389+ error = -EAGAIN;
83390+ break;
83391+ }
83392+
83393+ if (current->role->roletype & GR_ROLE_SPECIAL) {
83394+ char *p = "";
83395+ int i = 0;
83396+
83397+ read_lock(&tasklist_lock);
83398+ if (current->real_parent) {
83399+ p = current->real_parent->role->rolename;
83400+ i = current->real_parent->acl_role_id;
83401+ }
83402+ read_unlock(&tasklist_lock);
83403+
83404+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
83405+ gr_set_acls(1);
83406+ } else {
83407+ error = -EPERM;
83408+ goto out;
83409+ }
83410+ break;
83411+ default:
83412+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
83413+ error = -EINVAL;
83414+ break;
83415+ }
83416+
83417+ if (error != -EPERM)
83418+ goto out;
83419+
83420+ if(!(gr_auth_attempts++))
83421+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
83422+
83423+ out:
83424+ mutex_unlock(&gr_dev_mutex);
83425+
83426+ if (!error)
83427+ error = req_count;
83428+
83429+ return error;
83430+}
83431+
83432+int
83433+gr_set_acls(const int type)
83434+{
83435+ struct task_struct *task, *task2;
83436+ struct acl_role_label *role = current->role;
83437+ struct acl_subject_label *subj;
83438+ __u16 acl_role_id = current->acl_role_id;
83439+ const struct cred *cred;
83440+ int ret;
83441+
83442+ rcu_read_lock();
83443+ read_lock(&tasklist_lock);
83444+ read_lock(&grsec_exec_file_lock);
83445+ do_each_thread(task2, task) {
83446+ /* check to see if we're called from the exit handler,
83447+ if so, only replace ACLs that have inherited the admin
83448+ ACL */
83449+
83450+ if (type && (task->role != role ||
83451+ task->acl_role_id != acl_role_id))
83452+ continue;
83453+
83454+ task->acl_role_id = 0;
83455+ task->acl_sp_role = 0;
83456+ task->inherited = 0;
83457+
83458+ if (task->exec_file) {
83459+ cred = __task_cred(task);
83460+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
83461+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
83462+ if (subj == NULL) {
83463+ ret = -EINVAL;
83464+ read_unlock(&grsec_exec_file_lock);
83465+ read_unlock(&tasklist_lock);
83466+ rcu_read_unlock();
83467+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
83468+ return ret;
83469+ }
83470+ __gr_apply_subject_to_task(polstate, task, subj);
83471+ } else {
83472+ // it's a kernel process
83473+ task->role = polstate->kernel_role;
83474+ task->acl = polstate->kernel_role->root_label;
83475+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
83476+ task->acl->mode &= ~GR_PROCFIND;
83477+#endif
83478+ }
83479+ } while_each_thread(task2, task);
83480+ read_unlock(&grsec_exec_file_lock);
83481+ read_unlock(&tasklist_lock);
83482+ rcu_read_unlock();
83483+
83484+ return 0;
83485+}
83486diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
83487new file mode 100644
83488index 0000000..39645c9
83489--- /dev/null
83490+++ b/grsecurity/gracl_res.c
83491@@ -0,0 +1,68 @@
83492+#include <linux/kernel.h>
83493+#include <linux/sched.h>
83494+#include <linux/gracl.h>
83495+#include <linux/grinternal.h>
83496+
83497+static const char *restab_log[] = {
83498+ [RLIMIT_CPU] = "RLIMIT_CPU",
83499+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
83500+ [RLIMIT_DATA] = "RLIMIT_DATA",
83501+ [RLIMIT_STACK] = "RLIMIT_STACK",
83502+ [RLIMIT_CORE] = "RLIMIT_CORE",
83503+ [RLIMIT_RSS] = "RLIMIT_RSS",
83504+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
83505+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
83506+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
83507+ [RLIMIT_AS] = "RLIMIT_AS",
83508+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
83509+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
83510+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
83511+ [RLIMIT_NICE] = "RLIMIT_NICE",
83512+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
83513+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
83514+ [GR_CRASH_RES] = "RLIMIT_CRASH"
83515+};
83516+
83517+void
83518+gr_log_resource(const struct task_struct *task,
83519+ const int res, const unsigned long wanted, const int gt)
83520+{
83521+ const struct cred *cred;
83522+ unsigned long rlim;
83523+
83524+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
83525+ return;
83526+
83527+ // not yet supported resource
83528+ if (unlikely(!restab_log[res]))
83529+ return;
83530+
83531+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
83532+ rlim = task_rlimit_max(task, res);
83533+ else
83534+ rlim = task_rlimit(task, res);
83535+
83536+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
83537+ return;
83538+
83539+ rcu_read_lock();
83540+ cred = __task_cred(task);
83541+
83542+ if (res == RLIMIT_NPROC &&
83543+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
83544+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
83545+ goto out_rcu_unlock;
83546+ else if (res == RLIMIT_MEMLOCK &&
83547+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
83548+ goto out_rcu_unlock;
83549+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
83550+ goto out_rcu_unlock;
83551+ rcu_read_unlock();
83552+
83553+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
83554+
83555+ return;
83556+out_rcu_unlock:
83557+ rcu_read_unlock();
83558+ return;
83559+}
83560diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
83561new file mode 100644
83562index 0000000..35d9e65
83563--- /dev/null
83564+++ b/grsecurity/gracl_segv.c
83565@@ -0,0 +1,324 @@
83566+#include <linux/kernel.h>
83567+#include <linux/mm.h>
83568+#include <asm/uaccess.h>
83569+#include <asm/errno.h>
83570+#include <asm/mman.h>
83571+#include <net/sock.h>
83572+#include <linux/file.h>
83573+#include <linux/fs.h>
83574+#include <linux/net.h>
83575+#include <linux/in.h>
83576+#include <linux/slab.h>
83577+#include <linux/types.h>
83578+#include <linux/sched.h>
83579+#include <linux/timer.h>
83580+#include <linux/gracl.h>
83581+#include <linux/grsecurity.h>
83582+#include <linux/grinternal.h>
83583+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83584+#include <linux/magic.h>
83585+#include <linux/pagemap.h>
83586+#include "../fs/btrfs/async-thread.h"
83587+#include "../fs/btrfs/ctree.h"
83588+#include "../fs/btrfs/btrfs_inode.h"
83589+#endif
83590+
83591+static struct crash_uid *uid_set;
83592+static unsigned short uid_used;
83593+static DEFINE_SPINLOCK(gr_uid_lock);
83594+extern rwlock_t gr_inode_lock;
83595+extern struct acl_subject_label *
83596+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
83597+ struct acl_role_label *role);
83598+
83599+static inline dev_t __get_dev(const struct dentry *dentry)
83600+{
83601+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83602+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
83603+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
83604+ else
83605+#endif
83606+ return dentry->d_sb->s_dev;
83607+}
83608+
83609+static inline u64 __get_ino(const struct dentry *dentry)
83610+{
83611+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83612+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
83613+ return btrfs_ino(dentry->d_inode);
83614+ else
83615+#endif
83616+ return dentry->d_inode->i_ino;
83617+}
83618+
83619+int
83620+gr_init_uidset(void)
83621+{
83622+ uid_set =
83623+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
83624+ uid_used = 0;
83625+
83626+ return uid_set ? 1 : 0;
83627+}
83628+
83629+void
83630+gr_free_uidset(void)
83631+{
83632+ if (uid_set) {
83633+ struct crash_uid *tmpset;
83634+ spin_lock(&gr_uid_lock);
83635+ tmpset = uid_set;
83636+ uid_set = NULL;
83637+ uid_used = 0;
83638+ spin_unlock(&gr_uid_lock);
83639+ if (tmpset)
83640+ kfree(tmpset);
83641+ }
83642+
83643+ return;
83644+}
83645+
83646+int
83647+gr_find_uid(const uid_t uid)
83648+{
83649+ struct crash_uid *tmp = uid_set;
83650+ uid_t buid;
83651+ int low = 0, high = uid_used - 1, mid;
83652+
83653+ while (high >= low) {
83654+ mid = (low + high) >> 1;
83655+ buid = tmp[mid].uid;
83656+ if (buid == uid)
83657+ return mid;
83658+ if (buid > uid)
83659+ high = mid - 1;
83660+ if (buid < uid)
83661+ low = mid + 1;
83662+ }
83663+
83664+ return -1;
83665+}
83666+
83667+static void
83668+gr_insertsort(void)
83669+{
83670+ unsigned short i, j;
83671+ struct crash_uid index;
83672+
83673+ for (i = 1; i < uid_used; i++) {
83674+ index = uid_set[i];
83675+ j = i;
83676+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
83677+ uid_set[j] = uid_set[j - 1];
83678+ j--;
83679+ }
83680+ uid_set[j] = index;
83681+ }
83682+
83683+ return;
83684+}
83685+
83686+static void
83687+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
83688+{
83689+ int loc;
83690+ uid_t uid = GR_GLOBAL_UID(kuid);
83691+
83692+ if (uid_used == GR_UIDTABLE_MAX)
83693+ return;
83694+
83695+ loc = gr_find_uid(uid);
83696+
83697+ if (loc >= 0) {
83698+ uid_set[loc].expires = expires;
83699+ return;
83700+ }
83701+
83702+ uid_set[uid_used].uid = uid;
83703+ uid_set[uid_used].expires = expires;
83704+ uid_used++;
83705+
83706+ gr_insertsort();
83707+
83708+ return;
83709+}
83710+
83711+void
83712+gr_remove_uid(const unsigned short loc)
83713+{
83714+ unsigned short i;
83715+
83716+ for (i = loc + 1; i < uid_used; i++)
83717+ uid_set[i - 1] = uid_set[i];
83718+
83719+ uid_used--;
83720+
83721+ return;
83722+}
83723+
83724+int
83725+gr_check_crash_uid(const kuid_t kuid)
83726+{
83727+ int loc;
83728+ int ret = 0;
83729+ uid_t uid;
83730+
83731+ if (unlikely(!gr_acl_is_enabled()))
83732+ return 0;
83733+
83734+ uid = GR_GLOBAL_UID(kuid);
83735+
83736+ spin_lock(&gr_uid_lock);
83737+ loc = gr_find_uid(uid);
83738+
83739+ if (loc < 0)
83740+ goto out_unlock;
83741+
83742+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
83743+ gr_remove_uid(loc);
83744+ else
83745+ ret = 1;
83746+
83747+out_unlock:
83748+ spin_unlock(&gr_uid_lock);
83749+ return ret;
83750+}
83751+
83752+static int
83753+proc_is_setxid(const struct cred *cred)
83754+{
83755+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
83756+ !uid_eq(cred->uid, cred->fsuid))
83757+ return 1;
83758+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
83759+ !gid_eq(cred->gid, cred->fsgid))
83760+ return 1;
83761+
83762+ return 0;
83763+}
83764+
83765+extern int gr_fake_force_sig(int sig, struct task_struct *t);
83766+
83767+void
83768+gr_handle_crash(struct task_struct *task, const int sig)
83769+{
83770+ struct acl_subject_label *curr;
83771+ struct task_struct *tsk, *tsk2;
83772+ const struct cred *cred;
83773+ const struct cred *cred2;
83774+
83775+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
83776+ return;
83777+
83778+ if (unlikely(!gr_acl_is_enabled()))
83779+ return;
83780+
83781+ curr = task->acl;
83782+
83783+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
83784+ return;
83785+
83786+ if (time_before_eq(curr->expires, get_seconds())) {
83787+ curr->expires = 0;
83788+ curr->crashes = 0;
83789+ }
83790+
83791+ curr->crashes++;
83792+
83793+ if (!curr->expires)
83794+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
83795+
83796+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
83797+ time_after(curr->expires, get_seconds())) {
83798+ rcu_read_lock();
83799+ cred = __task_cred(task);
83800+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
83801+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
83802+ spin_lock(&gr_uid_lock);
83803+ gr_insert_uid(cred->uid, curr->expires);
83804+ spin_unlock(&gr_uid_lock);
83805+ curr->expires = 0;
83806+ curr->crashes = 0;
83807+ read_lock(&tasklist_lock);
83808+ do_each_thread(tsk2, tsk) {
83809+ cred2 = __task_cred(tsk);
83810+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
83811+ gr_fake_force_sig(SIGKILL, tsk);
83812+ } while_each_thread(tsk2, tsk);
83813+ read_unlock(&tasklist_lock);
83814+ } else {
83815+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
83816+ read_lock(&tasklist_lock);
83817+ read_lock(&grsec_exec_file_lock);
83818+ do_each_thread(tsk2, tsk) {
83819+ if (likely(tsk != task)) {
83820+ // if this thread has the same subject as the one that triggered
83821+ // RES_CRASH and it's the same binary, kill it
83822+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
83823+ gr_fake_force_sig(SIGKILL, tsk);
83824+ }
83825+ } while_each_thread(tsk2, tsk);
83826+ read_unlock(&grsec_exec_file_lock);
83827+ read_unlock(&tasklist_lock);
83828+ }
83829+ rcu_read_unlock();
83830+ }
83831+
83832+ return;
83833+}
83834+
83835+int
83836+gr_check_crash_exec(const struct file *filp)
83837+{
83838+ struct acl_subject_label *curr;
83839+ struct dentry *dentry;
83840+
83841+ if (unlikely(!gr_acl_is_enabled()))
83842+ return 0;
83843+
83844+ read_lock(&gr_inode_lock);
83845+ dentry = filp->f_path.dentry;
83846+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
83847+ current->role);
83848+ read_unlock(&gr_inode_lock);
83849+
83850+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
83851+ (!curr->crashes && !curr->expires))
83852+ return 0;
83853+
83854+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
83855+ time_after(curr->expires, get_seconds()))
83856+ return 1;
83857+ else if (time_before_eq(curr->expires, get_seconds())) {
83858+ curr->crashes = 0;
83859+ curr->expires = 0;
83860+ }
83861+
83862+ return 0;
83863+}
83864+
83865+void
83866+gr_handle_alertkill(struct task_struct *task)
83867+{
83868+ struct acl_subject_label *curracl;
83869+ __u32 curr_ip;
83870+ struct task_struct *p, *p2;
83871+
83872+ if (unlikely(!gr_acl_is_enabled()))
83873+ return;
83874+
83875+ curracl = task->acl;
83876+ curr_ip = task->signal->curr_ip;
83877+
83878+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
83879+ read_lock(&tasklist_lock);
83880+ do_each_thread(p2, p) {
83881+ if (p->signal->curr_ip == curr_ip)
83882+ gr_fake_force_sig(SIGKILL, p);
83883+ } while_each_thread(p2, p);
83884+ read_unlock(&tasklist_lock);
83885+ } else if (curracl->mode & GR_KILLPROC)
83886+ gr_fake_force_sig(SIGKILL, task);
83887+
83888+ return;
83889+}
83890diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
83891new file mode 100644
83892index 0000000..6b0c9cc
83893--- /dev/null
83894+++ b/grsecurity/gracl_shm.c
83895@@ -0,0 +1,40 @@
83896+#include <linux/kernel.h>
83897+#include <linux/mm.h>
83898+#include <linux/sched.h>
83899+#include <linux/file.h>
83900+#include <linux/ipc.h>
83901+#include <linux/gracl.h>
83902+#include <linux/grsecurity.h>
83903+#include <linux/grinternal.h>
83904+
83905+int
83906+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
83907+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
83908+{
83909+ struct task_struct *task;
83910+
83911+ if (!gr_acl_is_enabled())
83912+ return 1;
83913+
83914+ rcu_read_lock();
83915+ read_lock(&tasklist_lock);
83916+
83917+ task = find_task_by_vpid(shm_cprid);
83918+
83919+ if (unlikely(!task))
83920+ task = find_task_by_vpid(shm_lapid);
83921+
83922+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
83923+ (task_pid_nr(task) == shm_lapid)) &&
83924+ (task->acl->mode & GR_PROTSHM) &&
83925+ (task->acl != current->acl))) {
83926+ read_unlock(&tasklist_lock);
83927+ rcu_read_unlock();
83928+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
83929+ return 0;
83930+ }
83931+ read_unlock(&tasklist_lock);
83932+ rcu_read_unlock();
83933+
83934+ return 1;
83935+}
83936diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
83937new file mode 100644
83938index 0000000..bc0be01
83939--- /dev/null
83940+++ b/grsecurity/grsec_chdir.c
83941@@ -0,0 +1,19 @@
83942+#include <linux/kernel.h>
83943+#include <linux/sched.h>
83944+#include <linux/fs.h>
83945+#include <linux/file.h>
83946+#include <linux/grsecurity.h>
83947+#include <linux/grinternal.h>
83948+
83949+void
83950+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
83951+{
83952+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
83953+ if ((grsec_enable_chdir && grsec_enable_group &&
83954+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
83955+ !grsec_enable_group)) {
83956+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
83957+ }
83958+#endif
83959+ return;
83960+}
83961diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
83962new file mode 100644
83963index 0000000..114ea4f
83964--- /dev/null
83965+++ b/grsecurity/grsec_chroot.c
83966@@ -0,0 +1,467 @@
83967+#include <linux/kernel.h>
83968+#include <linux/module.h>
83969+#include <linux/sched.h>
83970+#include <linux/file.h>
83971+#include <linux/fs.h>
83972+#include <linux/mount.h>
83973+#include <linux/types.h>
83974+#include "../fs/mount.h"
83975+#include <linux/grsecurity.h>
83976+#include <linux/grinternal.h>
83977+
83978+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
83979+int gr_init_ran;
83980+#endif
83981+
83982+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
83983+{
83984+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
83985+ struct dentry *tmpd = dentry;
83986+
83987+ read_seqlock_excl(&mount_lock);
83988+ write_seqlock(&rename_lock);
83989+
83990+ while (tmpd != mnt->mnt_root) {
83991+ atomic_inc(&tmpd->chroot_refcnt);
83992+ tmpd = tmpd->d_parent;
83993+ }
83994+ atomic_inc(&tmpd->chroot_refcnt);
83995+
83996+ write_sequnlock(&rename_lock);
83997+ read_sequnlock_excl(&mount_lock);
83998+#endif
83999+}
84000+
84001+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
84002+{
84003+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84004+ struct dentry *tmpd = dentry;
84005+
84006+ read_seqlock_excl(&mount_lock);
84007+ write_seqlock(&rename_lock);
84008+
84009+ while (tmpd != mnt->mnt_root) {
84010+ atomic_dec(&tmpd->chroot_refcnt);
84011+ tmpd = tmpd->d_parent;
84012+ }
84013+ atomic_dec(&tmpd->chroot_refcnt);
84014+
84015+ write_sequnlock(&rename_lock);
84016+ read_sequnlock_excl(&mount_lock);
84017+#endif
84018+}
84019+
84020+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84021+static struct dentry *get_closest_chroot(struct dentry *dentry)
84022+{
84023+ write_seqlock(&rename_lock);
84024+ do {
84025+ if (atomic_read(&dentry->chroot_refcnt)) {
84026+ write_sequnlock(&rename_lock);
84027+ return dentry;
84028+ }
84029+ dentry = dentry->d_parent;
84030+ } while (!IS_ROOT(dentry));
84031+ write_sequnlock(&rename_lock);
84032+ return NULL;
84033+}
84034+#endif
84035+
84036+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
84037+ struct dentry *newdentry, struct vfsmount *newmnt)
84038+{
84039+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84040+ struct dentry *chroot;
84041+
84042+ if (unlikely(!grsec_enable_chroot_rename))
84043+ return 0;
84044+
84045+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
84046+ return 0;
84047+
84048+ chroot = get_closest_chroot(olddentry);
84049+
84050+ if (chroot == NULL)
84051+ return 0;
84052+
84053+ if (is_subdir(newdentry, chroot))
84054+ return 0;
84055+
84056+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
84057+
84058+ return 1;
84059+#else
84060+ return 0;
84061+#endif
84062+}
84063+
84064+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
84065+{
84066+#ifdef CONFIG_GRKERNSEC
84067+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
84068+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
84069+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84070+ && gr_init_ran
84071+#endif
84072+ )
84073+ task->gr_is_chrooted = 1;
84074+ else {
84075+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84076+ if (task_pid_nr(task) == 1 && !gr_init_ran)
84077+ gr_init_ran = 1;
84078+#endif
84079+ task->gr_is_chrooted = 0;
84080+ }
84081+
84082+ task->gr_chroot_dentry = path->dentry;
84083+#endif
84084+ return;
84085+}
84086+
84087+void gr_clear_chroot_entries(struct task_struct *task)
84088+{
84089+#ifdef CONFIG_GRKERNSEC
84090+ task->gr_is_chrooted = 0;
84091+ task->gr_chroot_dentry = NULL;
84092+#endif
84093+ return;
84094+}
84095+
84096+int
84097+gr_handle_chroot_unix(const pid_t pid)
84098+{
84099+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
84100+ struct task_struct *p;
84101+
84102+ if (unlikely(!grsec_enable_chroot_unix))
84103+ return 1;
84104+
84105+ if (likely(!proc_is_chrooted(current)))
84106+ return 1;
84107+
84108+ rcu_read_lock();
84109+ read_lock(&tasklist_lock);
84110+ p = find_task_by_vpid_unrestricted(pid);
84111+ if (unlikely(p && !have_same_root(current, p))) {
84112+ read_unlock(&tasklist_lock);
84113+ rcu_read_unlock();
84114+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
84115+ return 0;
84116+ }
84117+ read_unlock(&tasklist_lock);
84118+ rcu_read_unlock();
84119+#endif
84120+ return 1;
84121+}
84122+
84123+int
84124+gr_handle_chroot_nice(void)
84125+{
84126+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
84127+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
84128+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
84129+ return -EPERM;
84130+ }
84131+#endif
84132+ return 0;
84133+}
84134+
84135+int
84136+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
84137+{
84138+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
84139+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
84140+ && proc_is_chrooted(current)) {
84141+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
84142+ return -EACCES;
84143+ }
84144+#endif
84145+ return 0;
84146+}
84147+
84148+int
84149+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
84150+{
84151+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84152+ struct task_struct *p;
84153+ int ret = 0;
84154+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
84155+ return ret;
84156+
84157+ read_lock(&tasklist_lock);
84158+ do_each_pid_task(pid, type, p) {
84159+ if (!have_same_root(current, p)) {
84160+ ret = 1;
84161+ goto out;
84162+ }
84163+ } while_each_pid_task(pid, type, p);
84164+out:
84165+ read_unlock(&tasklist_lock);
84166+ return ret;
84167+#endif
84168+ return 0;
84169+}
84170+
84171+int
84172+gr_pid_is_chrooted(struct task_struct *p)
84173+{
84174+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84175+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
84176+ return 0;
84177+
84178+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
84179+ !have_same_root(current, p)) {
84180+ return 1;
84181+ }
84182+#endif
84183+ return 0;
84184+}
84185+
84186+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
84187+
84188+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
84189+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
84190+{
84191+ struct path path, currentroot;
84192+ int ret = 0;
84193+
84194+ path.dentry = (struct dentry *)u_dentry;
84195+ path.mnt = (struct vfsmount *)u_mnt;
84196+ get_fs_root(current->fs, &currentroot);
84197+ if (path_is_under(&path, &currentroot))
84198+ ret = 1;
84199+ path_put(&currentroot);
84200+
84201+ return ret;
84202+}
84203+#endif
84204+
84205+int
84206+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
84207+{
84208+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
84209+ if (!grsec_enable_chroot_fchdir)
84210+ return 1;
84211+
84212+ if (!proc_is_chrooted(current))
84213+ return 1;
84214+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
84215+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
84216+ return 0;
84217+ }
84218+#endif
84219+ return 1;
84220+}
84221+
84222+int
84223+gr_chroot_fhandle(void)
84224+{
84225+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
84226+ if (!grsec_enable_chroot_fchdir)
84227+ return 1;
84228+
84229+ if (!proc_is_chrooted(current))
84230+ return 1;
84231+ else {
84232+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
84233+ return 0;
84234+ }
84235+#endif
84236+ return 1;
84237+}
84238+
84239+int
84240+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84241+ const u64 shm_createtime)
84242+{
84243+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
84244+ struct task_struct *p;
84245+
84246+ if (unlikely(!grsec_enable_chroot_shmat))
84247+ return 1;
84248+
84249+ if (likely(!proc_is_chrooted(current)))
84250+ return 1;
84251+
84252+ rcu_read_lock();
84253+ read_lock(&tasklist_lock);
84254+
84255+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
84256+ if (time_before_eq64(p->start_time, shm_createtime)) {
84257+ if (have_same_root(current, p)) {
84258+ goto allow;
84259+ } else {
84260+ read_unlock(&tasklist_lock);
84261+ rcu_read_unlock();
84262+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
84263+ return 0;
84264+ }
84265+ }
84266+ /* creator exited, pid reuse, fall through to next check */
84267+ }
84268+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
84269+ if (unlikely(!have_same_root(current, p))) {
84270+ read_unlock(&tasklist_lock);
84271+ rcu_read_unlock();
84272+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
84273+ return 0;
84274+ }
84275+ }
84276+
84277+allow:
84278+ read_unlock(&tasklist_lock);
84279+ rcu_read_unlock();
84280+#endif
84281+ return 1;
84282+}
84283+
84284+void
84285+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
84286+{
84287+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
84288+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
84289+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
84290+#endif
84291+ return;
84292+}
84293+
84294+int
84295+gr_handle_chroot_mknod(const struct dentry *dentry,
84296+ const struct vfsmount *mnt, const int mode)
84297+{
84298+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
84299+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
84300+ proc_is_chrooted(current)) {
84301+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
84302+ return -EPERM;
84303+ }
84304+#endif
84305+ return 0;
84306+}
84307+
84308+int
84309+gr_handle_chroot_mount(const struct dentry *dentry,
84310+ const struct vfsmount *mnt, const char *dev_name)
84311+{
84312+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
84313+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
84314+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
84315+ return -EPERM;
84316+ }
84317+#endif
84318+ return 0;
84319+}
84320+
84321+int
84322+gr_handle_chroot_pivot(void)
84323+{
84324+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
84325+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
84326+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
84327+ return -EPERM;
84328+ }
84329+#endif
84330+ return 0;
84331+}
84332+
84333+int
84334+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
84335+{
84336+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
84337+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
84338+ !gr_is_outside_chroot(dentry, mnt)) {
84339+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
84340+ return -EPERM;
84341+ }
84342+#endif
84343+ return 0;
84344+}
84345+
84346+extern const char *captab_log[];
84347+extern int captab_log_entries;
84348+
84349+int
84350+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
84351+{
84352+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84353+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
84354+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
84355+ if (cap_raised(chroot_caps, cap)) {
84356+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
84357+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
84358+ }
84359+ return 0;
84360+ }
84361+ }
84362+#endif
84363+ return 1;
84364+}
84365+
84366+int
84367+gr_chroot_is_capable(const int cap)
84368+{
84369+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84370+ return gr_task_chroot_is_capable(current, current_cred(), cap);
84371+#endif
84372+ return 1;
84373+}
84374+
84375+int
84376+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
84377+{
84378+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84379+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
84380+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
84381+ if (cap_raised(chroot_caps, cap)) {
84382+ return 0;
84383+ }
84384+ }
84385+#endif
84386+ return 1;
84387+}
84388+
84389+int
84390+gr_chroot_is_capable_nolog(const int cap)
84391+{
84392+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84393+ return gr_task_chroot_is_capable_nolog(current, cap);
84394+#endif
84395+ return 1;
84396+}
84397+
84398+int
84399+gr_handle_chroot_sysctl(const int op)
84400+{
84401+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
84402+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
84403+ proc_is_chrooted(current))
84404+ return -EACCES;
84405+#endif
84406+ return 0;
84407+}
84408+
84409+void
84410+gr_handle_chroot_chdir(const struct path *path)
84411+{
84412+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
84413+ if (grsec_enable_chroot_chdir)
84414+ set_fs_pwd(current->fs, path);
84415+#endif
84416+ return;
84417+}
84418+
84419+int
84420+gr_handle_chroot_chmod(const struct dentry *dentry,
84421+ const struct vfsmount *mnt, const int mode)
84422+{
84423+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
84424+ /* allow chmod +s on directories, but not files */
84425+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
84426+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
84427+ proc_is_chrooted(current)) {
84428+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
84429+ return -EPERM;
84430+ }
84431+#endif
84432+ return 0;
84433+}
84434diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
84435new file mode 100644
84436index 0000000..946f750
84437--- /dev/null
84438+++ b/grsecurity/grsec_disabled.c
84439@@ -0,0 +1,445 @@
84440+#include <linux/kernel.h>
84441+#include <linux/module.h>
84442+#include <linux/sched.h>
84443+#include <linux/file.h>
84444+#include <linux/fs.h>
84445+#include <linux/kdev_t.h>
84446+#include <linux/net.h>
84447+#include <linux/in.h>
84448+#include <linux/ip.h>
84449+#include <linux/skbuff.h>
84450+#include <linux/sysctl.h>
84451+
84452+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84453+void
84454+pax_set_initial_flags(struct linux_binprm *bprm)
84455+{
84456+ return;
84457+}
84458+#endif
84459+
84460+#ifdef CONFIG_SYSCTL
84461+__u32
84462+gr_handle_sysctl(const struct ctl_table * table, const int op)
84463+{
84464+ return 0;
84465+}
84466+#endif
84467+
84468+#ifdef CONFIG_TASKSTATS
84469+int gr_is_taskstats_denied(int pid)
84470+{
84471+ return 0;
84472+}
84473+#endif
84474+
84475+int
84476+gr_acl_is_enabled(void)
84477+{
84478+ return 0;
84479+}
84480+
84481+int
84482+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
84483+{
84484+ return 0;
84485+}
84486+
84487+void
84488+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
84489+{
84490+ return;
84491+}
84492+
84493+int
84494+gr_handle_rawio(const struct inode *inode)
84495+{
84496+ return 0;
84497+}
84498+
84499+void
84500+gr_acl_handle_psacct(struct task_struct *task, const long code)
84501+{
84502+ return;
84503+}
84504+
84505+int
84506+gr_handle_ptrace(struct task_struct *task, const long request)
84507+{
84508+ return 0;
84509+}
84510+
84511+int
84512+gr_handle_proc_ptrace(struct task_struct *task)
84513+{
84514+ return 0;
84515+}
84516+
84517+int
84518+gr_set_acls(const int type)
84519+{
84520+ return 0;
84521+}
84522+
84523+int
84524+gr_check_hidden_task(const struct task_struct *tsk)
84525+{
84526+ return 0;
84527+}
84528+
84529+int
84530+gr_check_protected_task(const struct task_struct *task)
84531+{
84532+ return 0;
84533+}
84534+
84535+int
84536+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
84537+{
84538+ return 0;
84539+}
84540+
84541+void
84542+gr_copy_label(struct task_struct *tsk)
84543+{
84544+ return;
84545+}
84546+
84547+void
84548+gr_set_pax_flags(struct task_struct *task)
84549+{
84550+ return;
84551+}
84552+
84553+int
84554+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
84555+ const int unsafe_share)
84556+{
84557+ return 0;
84558+}
84559+
84560+void
84561+gr_handle_delete(const u64 ino, const dev_t dev)
84562+{
84563+ return;
84564+}
84565+
84566+void
84567+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
84568+{
84569+ return;
84570+}
84571+
84572+void
84573+gr_handle_crash(struct task_struct *task, const int sig)
84574+{
84575+ return;
84576+}
84577+
84578+int
84579+gr_check_crash_exec(const struct file *filp)
84580+{
84581+ return 0;
84582+}
84583+
84584+int
84585+gr_check_crash_uid(const kuid_t uid)
84586+{
84587+ return 0;
84588+}
84589+
84590+void
84591+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84592+ struct dentry *old_dentry,
84593+ struct dentry *new_dentry,
84594+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
84595+{
84596+ return;
84597+}
84598+
84599+int
84600+gr_search_socket(const int family, const int type, const int protocol)
84601+{
84602+ return 1;
84603+}
84604+
84605+int
84606+gr_search_connectbind(const int mode, const struct socket *sock,
84607+ const struct sockaddr_in *addr)
84608+{
84609+ return 0;
84610+}
84611+
84612+void
84613+gr_handle_alertkill(struct task_struct *task)
84614+{
84615+ return;
84616+}
84617+
84618+__u32
84619+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
84620+{
84621+ return 1;
84622+}
84623+
84624+__u32
84625+gr_acl_handle_hidden_file(const struct dentry * dentry,
84626+ const struct vfsmount * mnt)
84627+{
84628+ return 1;
84629+}
84630+
84631+__u32
84632+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
84633+ int acc_mode)
84634+{
84635+ return 1;
84636+}
84637+
84638+__u32
84639+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
84640+{
84641+ return 1;
84642+}
84643+
84644+__u32
84645+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
84646+{
84647+ return 1;
84648+}
84649+
84650+int
84651+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
84652+ unsigned int *vm_flags)
84653+{
84654+ return 1;
84655+}
84656+
84657+__u32
84658+gr_acl_handle_truncate(const struct dentry * dentry,
84659+ const struct vfsmount * mnt)
84660+{
84661+ return 1;
84662+}
84663+
84664+__u32
84665+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
84666+{
84667+ return 1;
84668+}
84669+
84670+__u32
84671+gr_acl_handle_access(const struct dentry * dentry,
84672+ const struct vfsmount * mnt, const int fmode)
84673+{
84674+ return 1;
84675+}
84676+
84677+__u32
84678+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
84679+ umode_t *mode)
84680+{
84681+ return 1;
84682+}
84683+
84684+__u32
84685+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
84686+{
84687+ return 1;
84688+}
84689+
84690+__u32
84691+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
84692+{
84693+ return 1;
84694+}
84695+
84696+__u32
84697+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
84698+{
84699+ return 1;
84700+}
84701+
84702+void
84703+grsecurity_init(void)
84704+{
84705+ return;
84706+}
84707+
84708+umode_t gr_acl_umask(void)
84709+{
84710+ return 0;
84711+}
84712+
84713+__u32
84714+gr_acl_handle_mknod(const struct dentry * new_dentry,
84715+ const struct dentry * parent_dentry,
84716+ const struct vfsmount * parent_mnt,
84717+ const int mode)
84718+{
84719+ return 1;
84720+}
84721+
84722+__u32
84723+gr_acl_handle_mkdir(const struct dentry * new_dentry,
84724+ const struct dentry * parent_dentry,
84725+ const struct vfsmount * parent_mnt)
84726+{
84727+ return 1;
84728+}
84729+
84730+__u32
84731+gr_acl_handle_symlink(const struct dentry * new_dentry,
84732+ const struct dentry * parent_dentry,
84733+ const struct vfsmount * parent_mnt, const struct filename *from)
84734+{
84735+ return 1;
84736+}
84737+
84738+__u32
84739+gr_acl_handle_link(const struct dentry * new_dentry,
84740+ const struct dentry * parent_dentry,
84741+ const struct vfsmount * parent_mnt,
84742+ const struct dentry * old_dentry,
84743+ const struct vfsmount * old_mnt, const struct filename *to)
84744+{
84745+ return 1;
84746+}
84747+
84748+int
84749+gr_acl_handle_rename(const struct dentry *new_dentry,
84750+ const struct dentry *parent_dentry,
84751+ const struct vfsmount *parent_mnt,
84752+ const struct dentry *old_dentry,
84753+ const struct inode *old_parent_inode,
84754+ const struct vfsmount *old_mnt, const struct filename *newname,
84755+ unsigned int flags)
84756+{
84757+ return 0;
84758+}
84759+
84760+int
84761+gr_acl_handle_filldir(const struct file *file, const char *name,
84762+ const int namelen, const u64 ino)
84763+{
84764+ return 1;
84765+}
84766+
84767+int
84768+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84769+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
84770+{
84771+ return 1;
84772+}
84773+
84774+int
84775+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
84776+{
84777+ return 0;
84778+}
84779+
84780+int
84781+gr_search_accept(const struct socket *sock)
84782+{
84783+ return 0;
84784+}
84785+
84786+int
84787+gr_search_listen(const struct socket *sock)
84788+{
84789+ return 0;
84790+}
84791+
84792+int
84793+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
84794+{
84795+ return 0;
84796+}
84797+
84798+__u32
84799+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
84800+{
84801+ return 1;
84802+}
84803+
84804+__u32
84805+gr_acl_handle_creat(const struct dentry * dentry,
84806+ const struct dentry * p_dentry,
84807+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
84808+ const int imode)
84809+{
84810+ return 1;
84811+}
84812+
84813+void
84814+gr_acl_handle_exit(void)
84815+{
84816+ return;
84817+}
84818+
84819+int
84820+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
84821+{
84822+ return 1;
84823+}
84824+
84825+void
84826+gr_set_role_label(const kuid_t uid, const kgid_t gid)
84827+{
84828+ return;
84829+}
84830+
84831+int
84832+gr_acl_handle_procpidmem(const struct task_struct *task)
84833+{
84834+ return 0;
84835+}
84836+
84837+int
84838+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
84839+{
84840+ return 0;
84841+}
84842+
84843+int
84844+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
84845+{
84846+ return 0;
84847+}
84848+
84849+int
84850+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
84851+{
84852+ return 0;
84853+}
84854+
84855+int
84856+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
84857+{
84858+ return 0;
84859+}
84860+
84861+int gr_acl_enable_at_secure(void)
84862+{
84863+ return 0;
84864+}
84865+
84866+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
84867+{
84868+ return dentry->d_sb->s_dev;
84869+}
84870+
84871+u64 gr_get_ino_from_dentry(struct dentry *dentry)
84872+{
84873+ return dentry->d_inode->i_ino;
84874+}
84875+
84876+void gr_put_exec_file(struct task_struct *task)
84877+{
84878+ return;
84879+}
84880+
84881+#ifdef CONFIG_SECURITY
84882+EXPORT_SYMBOL_GPL(gr_check_user_change);
84883+EXPORT_SYMBOL_GPL(gr_check_group_change);
84884+#endif
84885diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
84886new file mode 100644
84887index 0000000..fb7531e
84888--- /dev/null
84889+++ b/grsecurity/grsec_exec.c
84890@@ -0,0 +1,189 @@
84891+#include <linux/kernel.h>
84892+#include <linux/sched.h>
84893+#include <linux/file.h>
84894+#include <linux/binfmts.h>
84895+#include <linux/fs.h>
84896+#include <linux/types.h>
84897+#include <linux/grdefs.h>
84898+#include <linux/grsecurity.h>
84899+#include <linux/grinternal.h>
84900+#include <linux/capability.h>
84901+#include <linux/module.h>
84902+#include <linux/compat.h>
84903+
84904+#include <asm/uaccess.h>
84905+
84906+#ifdef CONFIG_GRKERNSEC_EXECLOG
84907+static char gr_exec_arg_buf[132];
84908+static DEFINE_MUTEX(gr_exec_arg_mutex);
84909+#endif
84910+
84911+struct user_arg_ptr {
84912+#ifdef CONFIG_COMPAT
84913+ bool is_compat;
84914+#endif
84915+ union {
84916+ const char __user *const __user *native;
84917+#ifdef CONFIG_COMPAT
84918+ const compat_uptr_t __user *compat;
84919+#endif
84920+ } ptr;
84921+};
84922+
84923+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
84924+
84925+void
84926+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
84927+{
84928+#ifdef CONFIG_GRKERNSEC_EXECLOG
84929+ char *grarg = gr_exec_arg_buf;
84930+ unsigned int i, x, execlen = 0;
84931+ char c;
84932+
84933+ if (!((grsec_enable_execlog && grsec_enable_group &&
84934+ in_group_p(grsec_audit_gid))
84935+ || (grsec_enable_execlog && !grsec_enable_group)))
84936+ return;
84937+
84938+ mutex_lock(&gr_exec_arg_mutex);
84939+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
84940+
84941+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
84942+ const char __user *p;
84943+ unsigned int len;
84944+
84945+ p = get_user_arg_ptr(argv, i);
84946+ if (IS_ERR(p))
84947+ goto log;
84948+
84949+ len = strnlen_user(p, 128 - execlen);
84950+ if (len > 128 - execlen)
84951+ len = 128 - execlen;
84952+ else if (len > 0)
84953+ len--;
84954+ if (copy_from_user(grarg + execlen, p, len))
84955+ goto log;
84956+
84957+ /* rewrite unprintable characters */
84958+ for (x = 0; x < len; x++) {
84959+ c = *(grarg + execlen + x);
84960+ if (c < 32 || c > 126)
84961+ *(grarg + execlen + x) = ' ';
84962+ }
84963+
84964+ execlen += len;
84965+ *(grarg + execlen) = ' ';
84966+ *(grarg + execlen + 1) = '\0';
84967+ execlen++;
84968+ }
84969+
84970+ log:
84971+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
84972+ bprm->file->f_path.mnt, grarg);
84973+ mutex_unlock(&gr_exec_arg_mutex);
84974+#endif
84975+ return;
84976+}
84977+
84978+#ifdef CONFIG_GRKERNSEC
84979+extern int gr_acl_is_capable(const int cap);
84980+extern int gr_acl_is_capable_nolog(const int cap);
84981+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
84982+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
84983+extern int gr_chroot_is_capable(const int cap);
84984+extern int gr_chroot_is_capable_nolog(const int cap);
84985+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
84986+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
84987+#endif
84988+
84989+const char *captab_log[] = {
84990+ "CAP_CHOWN",
84991+ "CAP_DAC_OVERRIDE",
84992+ "CAP_DAC_READ_SEARCH",
84993+ "CAP_FOWNER",
84994+ "CAP_FSETID",
84995+ "CAP_KILL",
84996+ "CAP_SETGID",
84997+ "CAP_SETUID",
84998+ "CAP_SETPCAP",
84999+ "CAP_LINUX_IMMUTABLE",
85000+ "CAP_NET_BIND_SERVICE",
85001+ "CAP_NET_BROADCAST",
85002+ "CAP_NET_ADMIN",
85003+ "CAP_NET_RAW",
85004+ "CAP_IPC_LOCK",
85005+ "CAP_IPC_OWNER",
85006+ "CAP_SYS_MODULE",
85007+ "CAP_SYS_RAWIO",
85008+ "CAP_SYS_CHROOT",
85009+ "CAP_SYS_PTRACE",
85010+ "CAP_SYS_PACCT",
85011+ "CAP_SYS_ADMIN",
85012+ "CAP_SYS_BOOT",
85013+ "CAP_SYS_NICE",
85014+ "CAP_SYS_RESOURCE",
85015+ "CAP_SYS_TIME",
85016+ "CAP_SYS_TTY_CONFIG",
85017+ "CAP_MKNOD",
85018+ "CAP_LEASE",
85019+ "CAP_AUDIT_WRITE",
85020+ "CAP_AUDIT_CONTROL",
85021+ "CAP_SETFCAP",
85022+ "CAP_MAC_OVERRIDE",
85023+ "CAP_MAC_ADMIN",
85024+ "CAP_SYSLOG",
85025+ "CAP_WAKE_ALARM",
85026+ "CAP_BLOCK_SUSPEND",
85027+ "CAP_AUDIT_READ"
85028+};
85029+
85030+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
85031+
85032+int gr_is_capable(const int cap)
85033+{
85034+#ifdef CONFIG_GRKERNSEC
85035+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
85036+ return 1;
85037+ return 0;
85038+#else
85039+ return 1;
85040+#endif
85041+}
85042+
85043+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
85044+{
85045+#ifdef CONFIG_GRKERNSEC
85046+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
85047+ return 1;
85048+ return 0;
85049+#else
85050+ return 1;
85051+#endif
85052+}
85053+
85054+int gr_is_capable_nolog(const int cap)
85055+{
85056+#ifdef CONFIG_GRKERNSEC
85057+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
85058+ return 1;
85059+ return 0;
85060+#else
85061+ return 1;
85062+#endif
85063+}
85064+
85065+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
85066+{
85067+#ifdef CONFIG_GRKERNSEC
85068+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
85069+ return 1;
85070+ return 0;
85071+#else
85072+ return 1;
85073+#endif
85074+}
85075+
85076+EXPORT_SYMBOL_GPL(gr_is_capable);
85077+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
85078+EXPORT_SYMBOL_GPL(gr_task_is_capable);
85079+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
85080diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
85081new file mode 100644
85082index 0000000..06cc6ea
85083--- /dev/null
85084+++ b/grsecurity/grsec_fifo.c
85085@@ -0,0 +1,24 @@
85086+#include <linux/kernel.h>
85087+#include <linux/sched.h>
85088+#include <linux/fs.h>
85089+#include <linux/file.h>
85090+#include <linux/grinternal.h>
85091+
85092+int
85093+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
85094+ const struct dentry *dir, const int flag, const int acc_mode)
85095+{
85096+#ifdef CONFIG_GRKERNSEC_FIFO
85097+ const struct cred *cred = current_cred();
85098+
85099+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
85100+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
85101+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
85102+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
85103+ if (!inode_permission(dentry->d_inode, acc_mode))
85104+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
85105+ return -EACCES;
85106+ }
85107+#endif
85108+ return 0;
85109+}
85110diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
85111new file mode 100644
85112index 0000000..8ca18bf
85113--- /dev/null
85114+++ b/grsecurity/grsec_fork.c
85115@@ -0,0 +1,23 @@
85116+#include <linux/kernel.h>
85117+#include <linux/sched.h>
85118+#include <linux/grsecurity.h>
85119+#include <linux/grinternal.h>
85120+#include <linux/errno.h>
85121+
85122+void
85123+gr_log_forkfail(const int retval)
85124+{
85125+#ifdef CONFIG_GRKERNSEC_FORKFAIL
85126+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
85127+ switch (retval) {
85128+ case -EAGAIN:
85129+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
85130+ break;
85131+ case -ENOMEM:
85132+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
85133+ break;
85134+ }
85135+ }
85136+#endif
85137+ return;
85138+}
85139diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
85140new file mode 100644
85141index 0000000..4ed9e7d
85142--- /dev/null
85143+++ b/grsecurity/grsec_init.c
85144@@ -0,0 +1,290 @@
85145+#include <linux/kernel.h>
85146+#include <linux/sched.h>
85147+#include <linux/mm.h>
85148+#include <linux/gracl.h>
85149+#include <linux/slab.h>
85150+#include <linux/vmalloc.h>
85151+#include <linux/percpu.h>
85152+#include <linux/module.h>
85153+
85154+int grsec_enable_ptrace_readexec;
85155+int grsec_enable_setxid;
85156+int grsec_enable_symlinkown;
85157+kgid_t grsec_symlinkown_gid;
85158+int grsec_enable_brute;
85159+int grsec_enable_link;
85160+int grsec_enable_dmesg;
85161+int grsec_enable_harden_ptrace;
85162+int grsec_enable_harden_ipc;
85163+int grsec_enable_fifo;
85164+int grsec_enable_execlog;
85165+int grsec_enable_signal;
85166+int grsec_enable_forkfail;
85167+int grsec_enable_audit_ptrace;
85168+int grsec_enable_time;
85169+int grsec_enable_group;
85170+kgid_t grsec_audit_gid;
85171+int grsec_enable_chdir;
85172+int grsec_enable_mount;
85173+int grsec_enable_rofs;
85174+int grsec_deny_new_usb;
85175+int grsec_enable_chroot_findtask;
85176+int grsec_enable_chroot_mount;
85177+int grsec_enable_chroot_shmat;
85178+int grsec_enable_chroot_fchdir;
85179+int grsec_enable_chroot_double;
85180+int grsec_enable_chroot_pivot;
85181+int grsec_enable_chroot_chdir;
85182+int grsec_enable_chroot_chmod;
85183+int grsec_enable_chroot_mknod;
85184+int grsec_enable_chroot_nice;
85185+int grsec_enable_chroot_execlog;
85186+int grsec_enable_chroot_caps;
85187+int grsec_enable_chroot_rename;
85188+int grsec_enable_chroot_sysctl;
85189+int grsec_enable_chroot_unix;
85190+int grsec_enable_tpe;
85191+kgid_t grsec_tpe_gid;
85192+int grsec_enable_blackhole;
85193+#ifdef CONFIG_IPV6_MODULE
85194+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
85195+#endif
85196+int grsec_lastack_retries;
85197+int grsec_enable_tpe_all;
85198+int grsec_enable_tpe_invert;
85199+int grsec_enable_socket_all;
85200+kgid_t grsec_socket_all_gid;
85201+int grsec_enable_socket_client;
85202+kgid_t grsec_socket_client_gid;
85203+int grsec_enable_socket_server;
85204+kgid_t grsec_socket_server_gid;
85205+int grsec_resource_logging;
85206+int grsec_disable_privio;
85207+int grsec_enable_log_rwxmaps;
85208+int grsec_lock;
85209+
85210+DEFINE_SPINLOCK(grsec_alert_lock);
85211+unsigned long grsec_alert_wtime = 0;
85212+unsigned long grsec_alert_fyet = 0;
85213+
85214+DEFINE_SPINLOCK(grsec_audit_lock);
85215+
85216+DEFINE_RWLOCK(grsec_exec_file_lock);
85217+
85218+char *gr_shared_page[4];
85219+
85220+char *gr_alert_log_fmt;
85221+char *gr_audit_log_fmt;
85222+char *gr_alert_log_buf;
85223+char *gr_audit_log_buf;
85224+
85225+extern struct gr_arg *gr_usermode;
85226+extern unsigned char *gr_system_salt;
85227+extern unsigned char *gr_system_sum;
85228+
85229+void __init
85230+grsecurity_init(void)
85231+{
85232+ int j;
85233+ /* create the per-cpu shared pages */
85234+
85235+#ifdef CONFIG_X86
85236+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
85237+#endif
85238+
85239+ for (j = 0; j < 4; j++) {
85240+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
85241+ if (gr_shared_page[j] == NULL) {
85242+ panic("Unable to allocate grsecurity shared page");
85243+ return;
85244+ }
85245+ }
85246+
85247+ /* allocate log buffers */
85248+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
85249+ if (!gr_alert_log_fmt) {
85250+ panic("Unable to allocate grsecurity alert log format buffer");
85251+ return;
85252+ }
85253+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
85254+ if (!gr_audit_log_fmt) {
85255+ panic("Unable to allocate grsecurity audit log format buffer");
85256+ return;
85257+ }
85258+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
85259+ if (!gr_alert_log_buf) {
85260+ panic("Unable to allocate grsecurity alert log buffer");
85261+ return;
85262+ }
85263+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
85264+ if (!gr_audit_log_buf) {
85265+ panic("Unable to allocate grsecurity audit log buffer");
85266+ return;
85267+ }
85268+
85269+ /* allocate memory for authentication structure */
85270+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
85271+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
85272+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
85273+
85274+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
85275+ panic("Unable to allocate grsecurity authentication structure");
85276+ return;
85277+ }
85278+
85279+#ifdef CONFIG_GRKERNSEC_IO
85280+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
85281+ grsec_disable_privio = 1;
85282+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
85283+ grsec_disable_privio = 1;
85284+#else
85285+ grsec_disable_privio = 0;
85286+#endif
85287+#endif
85288+
85289+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
85290+ /* for backward compatibility, tpe_invert always defaults to on if
85291+ enabled in the kernel
85292+ */
85293+ grsec_enable_tpe_invert = 1;
85294+#endif
85295+
85296+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
85297+#ifndef CONFIG_GRKERNSEC_SYSCTL
85298+ grsec_lock = 1;
85299+#endif
85300+
85301+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
85302+ grsec_enable_log_rwxmaps = 1;
85303+#endif
85304+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
85305+ grsec_enable_group = 1;
85306+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
85307+#endif
85308+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
85309+ grsec_enable_ptrace_readexec = 1;
85310+#endif
85311+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
85312+ grsec_enable_chdir = 1;
85313+#endif
85314+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
85315+ grsec_enable_harden_ptrace = 1;
85316+#endif
85317+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
85318+ grsec_enable_harden_ipc = 1;
85319+#endif
85320+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
85321+ grsec_enable_mount = 1;
85322+#endif
85323+#ifdef CONFIG_GRKERNSEC_LINK
85324+ grsec_enable_link = 1;
85325+#endif
85326+#ifdef CONFIG_GRKERNSEC_BRUTE
85327+ grsec_enable_brute = 1;
85328+#endif
85329+#ifdef CONFIG_GRKERNSEC_DMESG
85330+ grsec_enable_dmesg = 1;
85331+#endif
85332+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85333+ grsec_enable_blackhole = 1;
85334+ grsec_lastack_retries = 4;
85335+#endif
85336+#ifdef CONFIG_GRKERNSEC_FIFO
85337+ grsec_enable_fifo = 1;
85338+#endif
85339+#ifdef CONFIG_GRKERNSEC_EXECLOG
85340+ grsec_enable_execlog = 1;
85341+#endif
85342+#ifdef CONFIG_GRKERNSEC_SETXID
85343+ grsec_enable_setxid = 1;
85344+#endif
85345+#ifdef CONFIG_GRKERNSEC_SIGNAL
85346+ grsec_enable_signal = 1;
85347+#endif
85348+#ifdef CONFIG_GRKERNSEC_FORKFAIL
85349+ grsec_enable_forkfail = 1;
85350+#endif
85351+#ifdef CONFIG_GRKERNSEC_TIME
85352+ grsec_enable_time = 1;
85353+#endif
85354+#ifdef CONFIG_GRKERNSEC_RESLOG
85355+ grsec_resource_logging = 1;
85356+#endif
85357+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
85358+ grsec_enable_chroot_findtask = 1;
85359+#endif
85360+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
85361+ grsec_enable_chroot_unix = 1;
85362+#endif
85363+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
85364+ grsec_enable_chroot_mount = 1;
85365+#endif
85366+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
85367+ grsec_enable_chroot_fchdir = 1;
85368+#endif
85369+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
85370+ grsec_enable_chroot_shmat = 1;
85371+#endif
85372+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
85373+ grsec_enable_audit_ptrace = 1;
85374+#endif
85375+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
85376+ grsec_enable_chroot_double = 1;
85377+#endif
85378+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
85379+ grsec_enable_chroot_pivot = 1;
85380+#endif
85381+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
85382+ grsec_enable_chroot_chdir = 1;
85383+#endif
85384+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
85385+ grsec_enable_chroot_chmod = 1;
85386+#endif
85387+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
85388+ grsec_enable_chroot_mknod = 1;
85389+#endif
85390+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
85391+ grsec_enable_chroot_nice = 1;
85392+#endif
85393+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
85394+ grsec_enable_chroot_execlog = 1;
85395+#endif
85396+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
85397+ grsec_enable_chroot_caps = 1;
85398+#endif
85399+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
85400+ grsec_enable_chroot_rename = 1;
85401+#endif
85402+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
85403+ grsec_enable_chroot_sysctl = 1;
85404+#endif
85405+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
85406+ grsec_enable_symlinkown = 1;
85407+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
85408+#endif
85409+#ifdef CONFIG_GRKERNSEC_TPE
85410+ grsec_enable_tpe = 1;
85411+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
85412+#ifdef CONFIG_GRKERNSEC_TPE_ALL
85413+ grsec_enable_tpe_all = 1;
85414+#endif
85415+#endif
85416+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
85417+ grsec_enable_socket_all = 1;
85418+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
85419+#endif
85420+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
85421+ grsec_enable_socket_client = 1;
85422+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
85423+#endif
85424+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
85425+ grsec_enable_socket_server = 1;
85426+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
85427+#endif
85428+#endif
85429+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
85430+ grsec_deny_new_usb = 1;
85431+#endif
85432+
85433+ return;
85434+}
85435diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
85436new file mode 100644
85437index 0000000..1773300
85438--- /dev/null
85439+++ b/grsecurity/grsec_ipc.c
85440@@ -0,0 +1,48 @@
85441+#include <linux/kernel.h>
85442+#include <linux/mm.h>
85443+#include <linux/sched.h>
85444+#include <linux/file.h>
85445+#include <linux/ipc.h>
85446+#include <linux/ipc_namespace.h>
85447+#include <linux/grsecurity.h>
85448+#include <linux/grinternal.h>
85449+
85450+int
85451+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
85452+{
85453+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
85454+ int write;
85455+ int orig_granted_mode;
85456+ kuid_t euid;
85457+ kgid_t egid;
85458+
85459+ if (!grsec_enable_harden_ipc)
85460+ return 1;
85461+
85462+ euid = current_euid();
85463+ egid = current_egid();
85464+
85465+ write = requested_mode & 00002;
85466+ orig_granted_mode = ipcp->mode;
85467+
85468+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
85469+ orig_granted_mode >>= 6;
85470+ else {
85471+ /* if likely wrong permissions, lock to user */
85472+ if (orig_granted_mode & 0007)
85473+ orig_granted_mode = 0;
85474+ /* otherwise do a egid-only check */
85475+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
85476+ orig_granted_mode >>= 3;
85477+ /* otherwise, no access */
85478+ else
85479+ orig_granted_mode = 0;
85480+ }
85481+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
85482+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
85483+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
85484+ return 0;
85485+ }
85486+#endif
85487+ return 1;
85488+}
85489diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
85490new file mode 100644
85491index 0000000..5e05e20
85492--- /dev/null
85493+++ b/grsecurity/grsec_link.c
85494@@ -0,0 +1,58 @@
85495+#include <linux/kernel.h>
85496+#include <linux/sched.h>
85497+#include <linux/fs.h>
85498+#include <linux/file.h>
85499+#include <linux/grinternal.h>
85500+
85501+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
85502+{
85503+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
85504+ const struct inode *link_inode = link->dentry->d_inode;
85505+
85506+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
85507+ /* ignore root-owned links, e.g. /proc/self */
85508+ gr_is_global_nonroot(link_inode->i_uid) && target &&
85509+ !uid_eq(link_inode->i_uid, target->i_uid)) {
85510+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
85511+ return 1;
85512+ }
85513+#endif
85514+ return 0;
85515+}
85516+
85517+int
85518+gr_handle_follow_link(const struct inode *parent,
85519+ const struct inode *inode,
85520+ const struct dentry *dentry, const struct vfsmount *mnt)
85521+{
85522+#ifdef CONFIG_GRKERNSEC_LINK
85523+ const struct cred *cred = current_cred();
85524+
85525+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
85526+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
85527+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
85528+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
85529+ return -EACCES;
85530+ }
85531+#endif
85532+ return 0;
85533+}
85534+
85535+int
85536+gr_handle_hardlink(const struct dentry *dentry,
85537+ const struct vfsmount *mnt,
85538+ struct inode *inode, const int mode, const struct filename *to)
85539+{
85540+#ifdef CONFIG_GRKERNSEC_LINK
85541+ const struct cred *cred = current_cred();
85542+
85543+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
85544+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
85545+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
85546+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
85547+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
85548+ return -EPERM;
85549+ }
85550+#endif
85551+ return 0;
85552+}
85553diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
85554new file mode 100644
85555index 0000000..dbe0a6b
85556--- /dev/null
85557+++ b/grsecurity/grsec_log.c
85558@@ -0,0 +1,341 @@
85559+#include <linux/kernel.h>
85560+#include <linux/sched.h>
85561+#include <linux/file.h>
85562+#include <linux/tty.h>
85563+#include <linux/fs.h>
85564+#include <linux/mm.h>
85565+#include <linux/grinternal.h>
85566+
85567+#ifdef CONFIG_TREE_PREEMPT_RCU
85568+#define DISABLE_PREEMPT() preempt_disable()
85569+#define ENABLE_PREEMPT() preempt_enable()
85570+#else
85571+#define DISABLE_PREEMPT()
85572+#define ENABLE_PREEMPT()
85573+#endif
85574+
85575+#define BEGIN_LOCKS(x) \
85576+ DISABLE_PREEMPT(); \
85577+ rcu_read_lock(); \
85578+ read_lock(&tasklist_lock); \
85579+ read_lock(&grsec_exec_file_lock); \
85580+ if (x != GR_DO_AUDIT) \
85581+ spin_lock(&grsec_alert_lock); \
85582+ else \
85583+ spin_lock(&grsec_audit_lock)
85584+
85585+#define END_LOCKS(x) \
85586+ if (x != GR_DO_AUDIT) \
85587+ spin_unlock(&grsec_alert_lock); \
85588+ else \
85589+ spin_unlock(&grsec_audit_lock); \
85590+ read_unlock(&grsec_exec_file_lock); \
85591+ read_unlock(&tasklist_lock); \
85592+ rcu_read_unlock(); \
85593+ ENABLE_PREEMPT(); \
85594+ if (x == GR_DONT_AUDIT) \
85595+ gr_handle_alertkill(current)
85596+
85597+enum {
85598+ FLOODING,
85599+ NO_FLOODING
85600+};
85601+
85602+extern char *gr_alert_log_fmt;
85603+extern char *gr_audit_log_fmt;
85604+extern char *gr_alert_log_buf;
85605+extern char *gr_audit_log_buf;
85606+
85607+static int gr_log_start(int audit)
85608+{
85609+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
85610+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
85611+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85612+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
85613+ unsigned long curr_secs = get_seconds();
85614+
85615+ if (audit == GR_DO_AUDIT)
85616+ goto set_fmt;
85617+
85618+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
85619+ grsec_alert_wtime = curr_secs;
85620+ grsec_alert_fyet = 0;
85621+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
85622+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
85623+ grsec_alert_fyet++;
85624+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
85625+ grsec_alert_wtime = curr_secs;
85626+ grsec_alert_fyet++;
85627+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
85628+ return FLOODING;
85629+ }
85630+ else return FLOODING;
85631+
85632+set_fmt:
85633+#endif
85634+ memset(buf, 0, PAGE_SIZE);
85635+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
85636+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
85637+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
85638+ } else if (current->signal->curr_ip) {
85639+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
85640+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
85641+ } else if (gr_acl_is_enabled()) {
85642+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
85643+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
85644+ } else {
85645+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
85646+ strcpy(buf, fmt);
85647+ }
85648+
85649+ return NO_FLOODING;
85650+}
85651+
85652+static void gr_log_middle(int audit, const char *msg, va_list ap)
85653+ __attribute__ ((format (printf, 2, 0)));
85654+
85655+static void gr_log_middle(int audit, const char *msg, va_list ap)
85656+{
85657+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85658+ unsigned int len = strlen(buf);
85659+
85660+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
85661+
85662+ return;
85663+}
85664+
85665+static void gr_log_middle_varargs(int audit, const char *msg, ...)
85666+ __attribute__ ((format (printf, 2, 3)));
85667+
85668+static void gr_log_middle_varargs(int audit, const char *msg, ...)
85669+{
85670+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85671+ unsigned int len = strlen(buf);
85672+ va_list ap;
85673+
85674+ va_start(ap, msg);
85675+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
85676+ va_end(ap);
85677+
85678+ return;
85679+}
85680+
85681+static void gr_log_end(int audit, int append_default)
85682+{
85683+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85684+ if (append_default) {
85685+ struct task_struct *task = current;
85686+ struct task_struct *parent = task->real_parent;
85687+ const struct cred *cred = __task_cred(task);
85688+ const struct cred *pcred = __task_cred(parent);
85689+ unsigned int len = strlen(buf);
85690+
85691+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
85692+ }
85693+
85694+ printk("%s\n", buf);
85695+
85696+ return;
85697+}
85698+
85699+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
85700+{
85701+ int logtype;
85702+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
85703+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
85704+ void *voidptr = NULL;
85705+ int num1 = 0, num2 = 0;
85706+ unsigned long ulong1 = 0, ulong2 = 0;
85707+ struct dentry *dentry = NULL;
85708+ struct vfsmount *mnt = NULL;
85709+ struct file *file = NULL;
85710+ struct task_struct *task = NULL;
85711+ struct vm_area_struct *vma = NULL;
85712+ const struct cred *cred, *pcred;
85713+ va_list ap;
85714+
85715+ BEGIN_LOCKS(audit);
85716+ logtype = gr_log_start(audit);
85717+ if (logtype == FLOODING) {
85718+ END_LOCKS(audit);
85719+ return;
85720+ }
85721+ va_start(ap, argtypes);
85722+ switch (argtypes) {
85723+ case GR_TTYSNIFF:
85724+ task = va_arg(ap, struct task_struct *);
85725+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
85726+ break;
85727+ case GR_SYSCTL_HIDDEN:
85728+ str1 = va_arg(ap, char *);
85729+ gr_log_middle_varargs(audit, msg, result, str1);
85730+ break;
85731+ case GR_RBAC:
85732+ dentry = va_arg(ap, struct dentry *);
85733+ mnt = va_arg(ap, struct vfsmount *);
85734+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
85735+ break;
85736+ case GR_RBAC_STR:
85737+ dentry = va_arg(ap, struct dentry *);
85738+ mnt = va_arg(ap, struct vfsmount *);
85739+ str1 = va_arg(ap, char *);
85740+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
85741+ break;
85742+ case GR_STR_RBAC:
85743+ str1 = va_arg(ap, char *);
85744+ dentry = va_arg(ap, struct dentry *);
85745+ mnt = va_arg(ap, struct vfsmount *);
85746+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
85747+ break;
85748+ case GR_RBAC_MODE2:
85749+ dentry = va_arg(ap, struct dentry *);
85750+ mnt = va_arg(ap, struct vfsmount *);
85751+ str1 = va_arg(ap, char *);
85752+ str2 = va_arg(ap, char *);
85753+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
85754+ break;
85755+ case GR_RBAC_MODE3:
85756+ dentry = va_arg(ap, struct dentry *);
85757+ mnt = va_arg(ap, struct vfsmount *);
85758+ str1 = va_arg(ap, char *);
85759+ str2 = va_arg(ap, char *);
85760+ str3 = va_arg(ap, char *);
85761+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
85762+ break;
85763+ case GR_FILENAME:
85764+ dentry = va_arg(ap, struct dentry *);
85765+ mnt = va_arg(ap, struct vfsmount *);
85766+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
85767+ break;
85768+ case GR_STR_FILENAME:
85769+ str1 = va_arg(ap, char *);
85770+ dentry = va_arg(ap, struct dentry *);
85771+ mnt = va_arg(ap, struct vfsmount *);
85772+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
85773+ break;
85774+ case GR_FILENAME_STR:
85775+ dentry = va_arg(ap, struct dentry *);
85776+ mnt = va_arg(ap, struct vfsmount *);
85777+ str1 = va_arg(ap, char *);
85778+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
85779+ break;
85780+ case GR_FILENAME_TWO_INT:
85781+ dentry = va_arg(ap, struct dentry *);
85782+ mnt = va_arg(ap, struct vfsmount *);
85783+ num1 = va_arg(ap, int);
85784+ num2 = va_arg(ap, int);
85785+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
85786+ break;
85787+ case GR_FILENAME_TWO_INT_STR:
85788+ dentry = va_arg(ap, struct dentry *);
85789+ mnt = va_arg(ap, struct vfsmount *);
85790+ num1 = va_arg(ap, int);
85791+ num2 = va_arg(ap, int);
85792+ str1 = va_arg(ap, char *);
85793+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
85794+ break;
85795+ case GR_TEXTREL:
85796+ file = va_arg(ap, struct file *);
85797+ ulong1 = va_arg(ap, unsigned long);
85798+ ulong2 = va_arg(ap, unsigned long);
85799+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
85800+ break;
85801+ case GR_PTRACE:
85802+ task = va_arg(ap, struct task_struct *);
85803+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
85804+ break;
85805+ case GR_RESOURCE:
85806+ task = va_arg(ap, struct task_struct *);
85807+ cred = __task_cred(task);
85808+ pcred = __task_cred(task->real_parent);
85809+ ulong1 = va_arg(ap, unsigned long);
85810+ str1 = va_arg(ap, char *);
85811+ ulong2 = va_arg(ap, unsigned long);
85812+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
85813+ break;
85814+ case GR_CAP:
85815+ task = va_arg(ap, struct task_struct *);
85816+ cred = __task_cred(task);
85817+ pcred = __task_cred(task->real_parent);
85818+ str1 = va_arg(ap, char *);
85819+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
85820+ break;
85821+ case GR_SIG:
85822+ str1 = va_arg(ap, char *);
85823+ voidptr = va_arg(ap, void *);
85824+ gr_log_middle_varargs(audit, msg, str1, voidptr);
85825+ break;
85826+ case GR_SIG2:
85827+ task = va_arg(ap, struct task_struct *);
85828+ cred = __task_cred(task);
85829+ pcred = __task_cred(task->real_parent);
85830+ num1 = va_arg(ap, int);
85831+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
85832+ break;
85833+ case GR_CRASH1:
85834+ task = va_arg(ap, struct task_struct *);
85835+ cred = __task_cred(task);
85836+ pcred = __task_cred(task->real_parent);
85837+ ulong1 = va_arg(ap, unsigned long);
85838+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
85839+ break;
85840+ case GR_CRASH2:
85841+ task = va_arg(ap, struct task_struct *);
85842+ cred = __task_cred(task);
85843+ pcred = __task_cred(task->real_parent);
85844+ ulong1 = va_arg(ap, unsigned long);
85845+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
85846+ break;
85847+ case GR_RWXMAP:
85848+ file = va_arg(ap, struct file *);
85849+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
85850+ break;
85851+ case GR_RWXMAPVMA:
85852+ vma = va_arg(ap, struct vm_area_struct *);
85853+ if (vma->vm_file)
85854+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
85855+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
85856+ str1 = "<stack>";
85857+ else if (vma->vm_start <= current->mm->brk &&
85858+ vma->vm_end >= current->mm->start_brk)
85859+ str1 = "<heap>";
85860+ else
85861+ str1 = "<anonymous mapping>";
85862+ gr_log_middle_varargs(audit, msg, str1);
85863+ break;
85864+ case GR_PSACCT:
85865+ {
85866+ unsigned int wday, cday;
85867+ __u8 whr, chr;
85868+ __u8 wmin, cmin;
85869+ __u8 wsec, csec;
85870+ char cur_tty[64] = { 0 };
85871+ char parent_tty[64] = { 0 };
85872+
85873+ task = va_arg(ap, struct task_struct *);
85874+ wday = va_arg(ap, unsigned int);
85875+ cday = va_arg(ap, unsigned int);
85876+ whr = va_arg(ap, int);
85877+ chr = va_arg(ap, int);
85878+ wmin = va_arg(ap, int);
85879+ cmin = va_arg(ap, int);
85880+ wsec = va_arg(ap, int);
85881+ csec = va_arg(ap, int);
85882+ ulong1 = va_arg(ap, unsigned long);
85883+ cred = __task_cred(task);
85884+ pcred = __task_cred(task->real_parent);
85885+
85886+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
85887+ }
85888+ break;
85889+ default:
85890+ gr_log_middle(audit, msg, ap);
85891+ }
85892+ va_end(ap);
85893+ // these don't need DEFAULTSECARGS printed on the end
85894+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
85895+ gr_log_end(audit, 0);
85896+ else
85897+ gr_log_end(audit, 1);
85898+ END_LOCKS(audit);
85899+}
85900diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
85901new file mode 100644
85902index 0000000..0e39d8c
85903--- /dev/null
85904+++ b/grsecurity/grsec_mem.c
85905@@ -0,0 +1,48 @@
85906+#include <linux/kernel.h>
85907+#include <linux/sched.h>
85908+#include <linux/mm.h>
85909+#include <linux/mman.h>
85910+#include <linux/module.h>
85911+#include <linux/grinternal.h>
85912+
85913+void gr_handle_msr_write(void)
85914+{
85915+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
85916+ return;
85917+}
85918+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
85919+
85920+void
85921+gr_handle_ioperm(void)
85922+{
85923+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
85924+ return;
85925+}
85926+
85927+void
85928+gr_handle_iopl(void)
85929+{
85930+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
85931+ return;
85932+}
85933+
85934+void
85935+gr_handle_mem_readwrite(u64 from, u64 to)
85936+{
85937+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
85938+ return;
85939+}
85940+
85941+void
85942+gr_handle_vm86(void)
85943+{
85944+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
85945+ return;
85946+}
85947+
85948+void
85949+gr_log_badprocpid(const char *entry)
85950+{
85951+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
85952+ return;
85953+}
85954diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
85955new file mode 100644
85956index 0000000..6f9eb73
85957--- /dev/null
85958+++ b/grsecurity/grsec_mount.c
85959@@ -0,0 +1,65 @@
85960+#include <linux/kernel.h>
85961+#include <linux/sched.h>
85962+#include <linux/mount.h>
85963+#include <linux/major.h>
85964+#include <linux/grsecurity.h>
85965+#include <linux/grinternal.h>
85966+
85967+void
85968+gr_log_remount(const char *devname, const int retval)
85969+{
85970+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
85971+ if (grsec_enable_mount && (retval >= 0))
85972+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
85973+#endif
85974+ return;
85975+}
85976+
85977+void
85978+gr_log_unmount(const char *devname, const int retval)
85979+{
85980+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
85981+ if (grsec_enable_mount && (retval >= 0))
85982+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
85983+#endif
85984+ return;
85985+}
85986+
85987+void
85988+gr_log_mount(const char *from, struct path *to, const int retval)
85989+{
85990+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
85991+ if (grsec_enable_mount && (retval >= 0))
85992+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
85993+#endif
85994+ return;
85995+}
85996+
85997+int
85998+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
85999+{
86000+#ifdef CONFIG_GRKERNSEC_ROFS
86001+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
86002+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
86003+ return -EPERM;
86004+ } else
86005+ return 0;
86006+#endif
86007+ return 0;
86008+}
86009+
86010+int
86011+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
86012+{
86013+#ifdef CONFIG_GRKERNSEC_ROFS
86014+ struct inode *inode = dentry->d_inode;
86015+
86016+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
86017+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
86018+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
86019+ return -EPERM;
86020+ } else
86021+ return 0;
86022+#endif
86023+ return 0;
86024+}
86025diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
86026new file mode 100644
86027index 0000000..6ee9d50
86028--- /dev/null
86029+++ b/grsecurity/grsec_pax.c
86030@@ -0,0 +1,45 @@
86031+#include <linux/kernel.h>
86032+#include <linux/sched.h>
86033+#include <linux/mm.h>
86034+#include <linux/file.h>
86035+#include <linux/grinternal.h>
86036+#include <linux/grsecurity.h>
86037+
86038+void
86039+gr_log_textrel(struct vm_area_struct * vma)
86040+{
86041+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86042+ if (grsec_enable_log_rwxmaps)
86043+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
86044+#endif
86045+ return;
86046+}
86047+
86048+void gr_log_ptgnustack(struct file *file)
86049+{
86050+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86051+ if (grsec_enable_log_rwxmaps)
86052+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
86053+#endif
86054+ return;
86055+}
86056+
86057+void
86058+gr_log_rwxmmap(struct file *file)
86059+{
86060+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86061+ if (grsec_enable_log_rwxmaps)
86062+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
86063+#endif
86064+ return;
86065+}
86066+
86067+void
86068+gr_log_rwxmprotect(struct vm_area_struct *vma)
86069+{
86070+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86071+ if (grsec_enable_log_rwxmaps)
86072+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
86073+#endif
86074+ return;
86075+}
86076diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
86077new file mode 100644
86078index 0000000..2005a3a
86079--- /dev/null
86080+++ b/grsecurity/grsec_proc.c
86081@@ -0,0 +1,20 @@
86082+#include <linux/kernel.h>
86083+#include <linux/sched.h>
86084+#include <linux/grsecurity.h>
86085+#include <linux/grinternal.h>
86086+
86087+int gr_proc_is_restricted(void)
86088+{
86089+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86090+ const struct cred *cred = current_cred();
86091+#endif
86092+
86093+#ifdef CONFIG_GRKERNSEC_PROC_USER
86094+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
86095+ return -EACCES;
86096+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86097+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
86098+ return -EACCES;
86099+#endif
86100+ return 0;
86101+}
86102diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
86103new file mode 100644
86104index 0000000..f7f29aa
86105--- /dev/null
86106+++ b/grsecurity/grsec_ptrace.c
86107@@ -0,0 +1,30 @@
86108+#include <linux/kernel.h>
86109+#include <linux/sched.h>
86110+#include <linux/grinternal.h>
86111+#include <linux/security.h>
86112+
86113+void
86114+gr_audit_ptrace(struct task_struct *task)
86115+{
86116+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
86117+ if (grsec_enable_audit_ptrace)
86118+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
86119+#endif
86120+ return;
86121+}
86122+
86123+int
86124+gr_ptrace_readexec(struct file *file, int unsafe_flags)
86125+{
86126+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
86127+ const struct dentry *dentry = file->f_path.dentry;
86128+ const struct vfsmount *mnt = file->f_path.mnt;
86129+
86130+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
86131+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
86132+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
86133+ return -EACCES;
86134+ }
86135+#endif
86136+ return 0;
86137+}
86138diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
86139new file mode 100644
86140index 0000000..3860c7e
86141--- /dev/null
86142+++ b/grsecurity/grsec_sig.c
86143@@ -0,0 +1,236 @@
86144+#include <linux/kernel.h>
86145+#include <linux/sched.h>
86146+#include <linux/fs.h>
86147+#include <linux/delay.h>
86148+#include <linux/grsecurity.h>
86149+#include <linux/grinternal.h>
86150+#include <linux/hardirq.h>
86151+
86152+char *signames[] = {
86153+ [SIGSEGV] = "Segmentation fault",
86154+ [SIGILL] = "Illegal instruction",
86155+ [SIGABRT] = "Abort",
86156+ [SIGBUS] = "Invalid alignment/Bus error"
86157+};
86158+
86159+void
86160+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
86161+{
86162+#ifdef CONFIG_GRKERNSEC_SIGNAL
86163+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
86164+ (sig == SIGABRT) || (sig == SIGBUS))) {
86165+ if (task_pid_nr(t) == task_pid_nr(current)) {
86166+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
86167+ } else {
86168+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
86169+ }
86170+ }
86171+#endif
86172+ return;
86173+}
86174+
86175+int
86176+gr_handle_signal(const struct task_struct *p, const int sig)
86177+{
86178+#ifdef CONFIG_GRKERNSEC
86179+ /* ignore the 0 signal for protected task checks */
86180+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
86181+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
86182+ return -EPERM;
86183+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
86184+ return -EPERM;
86185+ }
86186+#endif
86187+ return 0;
86188+}
86189+
86190+#ifdef CONFIG_GRKERNSEC
86191+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
86192+
86193+int gr_fake_force_sig(int sig, struct task_struct *t)
86194+{
86195+ unsigned long int flags;
86196+ int ret, blocked, ignored;
86197+ struct k_sigaction *action;
86198+
86199+ spin_lock_irqsave(&t->sighand->siglock, flags);
86200+ action = &t->sighand->action[sig-1];
86201+ ignored = action->sa.sa_handler == SIG_IGN;
86202+ blocked = sigismember(&t->blocked, sig);
86203+ if (blocked || ignored) {
86204+ action->sa.sa_handler = SIG_DFL;
86205+ if (blocked) {
86206+ sigdelset(&t->blocked, sig);
86207+ recalc_sigpending_and_wake(t);
86208+ }
86209+ }
86210+ if (action->sa.sa_handler == SIG_DFL)
86211+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
86212+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
86213+
86214+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
86215+
86216+ return ret;
86217+}
86218+#endif
86219+
86220+#define GR_USER_BAN_TIME (15 * 60)
86221+#define GR_DAEMON_BRUTE_TIME (30 * 60)
86222+
86223+void gr_handle_brute_attach(int dumpable)
86224+{
86225+#ifdef CONFIG_GRKERNSEC_BRUTE
86226+ struct task_struct *p = current;
86227+ kuid_t uid = GLOBAL_ROOT_UID;
86228+ int daemon = 0;
86229+
86230+ if (!grsec_enable_brute)
86231+ return;
86232+
86233+ rcu_read_lock();
86234+ read_lock(&tasklist_lock);
86235+ read_lock(&grsec_exec_file_lock);
86236+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
86237+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
86238+ p->real_parent->brute = 1;
86239+ daemon = 1;
86240+ } else {
86241+ const struct cred *cred = __task_cred(p), *cred2;
86242+ struct task_struct *tsk, *tsk2;
86243+
86244+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
86245+ struct user_struct *user;
86246+
86247+ uid = cred->uid;
86248+
86249+ /* this is put upon execution past expiration */
86250+ user = find_user(uid);
86251+ if (user == NULL)
86252+ goto unlock;
86253+ user->suid_banned = 1;
86254+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
86255+ if (user->suid_ban_expires == ~0UL)
86256+ user->suid_ban_expires--;
86257+
86258+ /* only kill other threads of the same binary, from the same user */
86259+ do_each_thread(tsk2, tsk) {
86260+ cred2 = __task_cred(tsk);
86261+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
86262+ gr_fake_force_sig(SIGKILL, tsk);
86263+ } while_each_thread(tsk2, tsk);
86264+ }
86265+ }
86266+unlock:
86267+ read_unlock(&grsec_exec_file_lock);
86268+ read_unlock(&tasklist_lock);
86269+ rcu_read_unlock();
86270+
86271+ if (gr_is_global_nonroot(uid))
86272+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
86273+ else if (daemon)
86274+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
86275+
86276+#endif
86277+ return;
86278+}
86279+
86280+void gr_handle_brute_check(void)
86281+{
86282+#ifdef CONFIG_GRKERNSEC_BRUTE
86283+ struct task_struct *p = current;
86284+
86285+ if (unlikely(p->brute)) {
86286+ if (!grsec_enable_brute)
86287+ p->brute = 0;
86288+ else if (time_before(get_seconds(), p->brute_expires))
86289+ msleep(30 * 1000);
86290+ }
86291+#endif
86292+ return;
86293+}
86294+
86295+void gr_handle_kernel_exploit(void)
86296+{
86297+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86298+ const struct cred *cred;
86299+ struct task_struct *tsk, *tsk2;
86300+ struct user_struct *user;
86301+ kuid_t uid;
86302+
86303+ if (in_irq() || in_serving_softirq() || in_nmi())
86304+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
86305+
86306+ uid = current_uid();
86307+
86308+ if (gr_is_global_root(uid))
86309+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
86310+ else {
86311+ /* kill all the processes of this user, hold a reference
86312+ to their creds struct, and prevent them from creating
86313+ another process until system reset
86314+ */
86315+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
86316+ GR_GLOBAL_UID(uid));
86317+ /* we intentionally leak this ref */
86318+ user = get_uid(current->cred->user);
86319+ if (user)
86320+ user->kernel_banned = 1;
86321+
86322+ /* kill all processes of this user */
86323+ read_lock(&tasklist_lock);
86324+ do_each_thread(tsk2, tsk) {
86325+ cred = __task_cred(tsk);
86326+ if (uid_eq(cred->uid, uid))
86327+ gr_fake_force_sig(SIGKILL, tsk);
86328+ } while_each_thread(tsk2, tsk);
86329+ read_unlock(&tasklist_lock);
86330+ }
86331+#endif
86332+}
86333+
86334+#ifdef CONFIG_GRKERNSEC_BRUTE
86335+static bool suid_ban_expired(struct user_struct *user)
86336+{
86337+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
86338+ user->suid_banned = 0;
86339+ user->suid_ban_expires = 0;
86340+ free_uid(user);
86341+ return true;
86342+ }
86343+
86344+ return false;
86345+}
86346+#endif
86347+
86348+int gr_process_kernel_exec_ban(void)
86349+{
86350+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86351+ if (unlikely(current->cred->user->kernel_banned))
86352+ return -EPERM;
86353+#endif
86354+ return 0;
86355+}
86356+
86357+int gr_process_kernel_setuid_ban(struct user_struct *user)
86358+{
86359+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86360+ if (unlikely(user->kernel_banned))
86361+ gr_fake_force_sig(SIGKILL, current);
86362+#endif
86363+ return 0;
86364+}
86365+
86366+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
86367+{
86368+#ifdef CONFIG_GRKERNSEC_BRUTE
86369+ struct user_struct *user = current->cred->user;
86370+ if (unlikely(user->suid_banned)) {
86371+ if (suid_ban_expired(user))
86372+ return 0;
86373+ /* disallow execution of suid binaries only */
86374+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
86375+ return -EPERM;
86376+ }
86377+#endif
86378+ return 0;
86379+}
86380diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
86381new file mode 100644
86382index 0000000..a523bd2
86383--- /dev/null
86384+++ b/grsecurity/grsec_sock.c
86385@@ -0,0 +1,244 @@
86386+#include <linux/kernel.h>
86387+#include <linux/module.h>
86388+#include <linux/sched.h>
86389+#include <linux/file.h>
86390+#include <linux/net.h>
86391+#include <linux/in.h>
86392+#include <linux/ip.h>
86393+#include <net/sock.h>
86394+#include <net/inet_sock.h>
86395+#include <linux/grsecurity.h>
86396+#include <linux/grinternal.h>
86397+#include <linux/gracl.h>
86398+
86399+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
86400+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
86401+
86402+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
86403+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
86404+
86405+#ifdef CONFIG_UNIX_MODULE
86406+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
86407+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
86408+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
86409+EXPORT_SYMBOL_GPL(gr_handle_create);
86410+#endif
86411+
86412+#ifdef CONFIG_GRKERNSEC
86413+#define gr_conn_table_size 32749
86414+struct conn_table_entry {
86415+ struct conn_table_entry *next;
86416+ struct signal_struct *sig;
86417+};
86418+
86419+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
86420+DEFINE_SPINLOCK(gr_conn_table_lock);
86421+
86422+extern const char * gr_socktype_to_name(unsigned char type);
86423+extern const char * gr_proto_to_name(unsigned char proto);
86424+extern const char * gr_sockfamily_to_name(unsigned char family);
86425+
86426+static int
86427+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
86428+{
86429+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
86430+}
86431+
86432+static int
86433+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
86434+ __u16 sport, __u16 dport)
86435+{
86436+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
86437+ sig->gr_sport == sport && sig->gr_dport == dport))
86438+ return 1;
86439+ else
86440+ return 0;
86441+}
86442+
86443+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
86444+{
86445+ struct conn_table_entry **match;
86446+ unsigned int index;
86447+
86448+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
86449+ sig->gr_sport, sig->gr_dport,
86450+ gr_conn_table_size);
86451+
86452+ newent->sig = sig;
86453+
86454+ match = &gr_conn_table[index];
86455+ newent->next = *match;
86456+ *match = newent;
86457+
86458+ return;
86459+}
86460+
86461+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
86462+{
86463+ struct conn_table_entry *match, *last = NULL;
86464+ unsigned int index;
86465+
86466+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
86467+ sig->gr_sport, sig->gr_dport,
86468+ gr_conn_table_size);
86469+
86470+ match = gr_conn_table[index];
86471+ while (match && !conn_match(match->sig,
86472+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
86473+ sig->gr_dport)) {
86474+ last = match;
86475+ match = match->next;
86476+ }
86477+
86478+ if (match) {
86479+ if (last)
86480+ last->next = match->next;
86481+ else
86482+ gr_conn_table[index] = NULL;
86483+ kfree(match);
86484+ }
86485+
86486+ return;
86487+}
86488+
86489+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
86490+ __u16 sport, __u16 dport)
86491+{
86492+ struct conn_table_entry *match;
86493+ unsigned int index;
86494+
86495+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
86496+
86497+ match = gr_conn_table[index];
86498+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
86499+ match = match->next;
86500+
86501+ if (match)
86502+ return match->sig;
86503+ else
86504+ return NULL;
86505+}
86506+
86507+#endif
86508+
86509+void gr_update_task_in_ip_table(const struct inet_sock *inet)
86510+{
86511+#ifdef CONFIG_GRKERNSEC
86512+ struct signal_struct *sig = current->signal;
86513+ struct conn_table_entry *newent;
86514+
86515+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
86516+ if (newent == NULL)
86517+ return;
86518+ /* no bh lock needed since we are called with bh disabled */
86519+ spin_lock(&gr_conn_table_lock);
86520+ gr_del_task_from_ip_table_nolock(sig);
86521+ sig->gr_saddr = inet->inet_rcv_saddr;
86522+ sig->gr_daddr = inet->inet_daddr;
86523+ sig->gr_sport = inet->inet_sport;
86524+ sig->gr_dport = inet->inet_dport;
86525+ gr_add_to_task_ip_table_nolock(sig, newent);
86526+ spin_unlock(&gr_conn_table_lock);
86527+#endif
86528+ return;
86529+}
86530+
86531+void gr_del_task_from_ip_table(struct task_struct *task)
86532+{
86533+#ifdef CONFIG_GRKERNSEC
86534+ spin_lock_bh(&gr_conn_table_lock);
86535+ gr_del_task_from_ip_table_nolock(task->signal);
86536+ spin_unlock_bh(&gr_conn_table_lock);
86537+#endif
86538+ return;
86539+}
86540+
86541+void
86542+gr_attach_curr_ip(const struct sock *sk)
86543+{
86544+#ifdef CONFIG_GRKERNSEC
86545+ struct signal_struct *p, *set;
86546+ const struct inet_sock *inet = inet_sk(sk);
86547+
86548+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
86549+ return;
86550+
86551+ set = current->signal;
86552+
86553+ spin_lock_bh(&gr_conn_table_lock);
86554+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
86555+ inet->inet_dport, inet->inet_sport);
86556+ if (unlikely(p != NULL)) {
86557+ set->curr_ip = p->curr_ip;
86558+ set->used_accept = 1;
86559+ gr_del_task_from_ip_table_nolock(p);
86560+ spin_unlock_bh(&gr_conn_table_lock);
86561+ return;
86562+ }
86563+ spin_unlock_bh(&gr_conn_table_lock);
86564+
86565+ set->curr_ip = inet->inet_daddr;
86566+ set->used_accept = 1;
86567+#endif
86568+ return;
86569+}
86570+
86571+int
86572+gr_handle_sock_all(const int family, const int type, const int protocol)
86573+{
86574+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
86575+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
86576+ (family != AF_UNIX)) {
86577+ if (family == AF_INET)
86578+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
86579+ else
86580+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
86581+ return -EACCES;
86582+ }
86583+#endif
86584+ return 0;
86585+}
86586+
86587+int
86588+gr_handle_sock_server(const struct sockaddr *sck)
86589+{
86590+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
86591+ if (grsec_enable_socket_server &&
86592+ in_group_p(grsec_socket_server_gid) &&
86593+ sck && (sck->sa_family != AF_UNIX) &&
86594+ (sck->sa_family != AF_LOCAL)) {
86595+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
86596+ return -EACCES;
86597+ }
86598+#endif
86599+ return 0;
86600+}
86601+
86602+int
86603+gr_handle_sock_server_other(const struct sock *sck)
86604+{
86605+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
86606+ if (grsec_enable_socket_server &&
86607+ in_group_p(grsec_socket_server_gid) &&
86608+ sck && (sck->sk_family != AF_UNIX) &&
86609+ (sck->sk_family != AF_LOCAL)) {
86610+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
86611+ return -EACCES;
86612+ }
86613+#endif
86614+ return 0;
86615+}
86616+
86617+int
86618+gr_handle_sock_client(const struct sockaddr *sck)
86619+{
86620+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
86621+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
86622+ sck && (sck->sa_family != AF_UNIX) &&
86623+ (sck->sa_family != AF_LOCAL)) {
86624+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
86625+ return -EACCES;
86626+ }
86627+#endif
86628+ return 0;
86629+}
86630diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
86631new file mode 100644
86632index 0000000..cce889e
86633--- /dev/null
86634+++ b/grsecurity/grsec_sysctl.c
86635@@ -0,0 +1,488 @@
86636+#include <linux/kernel.h>
86637+#include <linux/sched.h>
86638+#include <linux/sysctl.h>
86639+#include <linux/grsecurity.h>
86640+#include <linux/grinternal.h>
86641+
86642+int
86643+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
86644+{
86645+#ifdef CONFIG_GRKERNSEC_SYSCTL
86646+ if (dirname == NULL || name == NULL)
86647+ return 0;
86648+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
86649+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
86650+ return -EACCES;
86651+ }
86652+#endif
86653+ return 0;
86654+}
86655+
86656+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
86657+static int __maybe_unused __read_only one = 1;
86658+#endif
86659+
86660+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
86661+ defined(CONFIG_GRKERNSEC_DENYUSB)
86662+struct ctl_table grsecurity_table[] = {
86663+#ifdef CONFIG_GRKERNSEC_SYSCTL
86664+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
86665+#ifdef CONFIG_GRKERNSEC_IO
86666+ {
86667+ .procname = "disable_priv_io",
86668+ .data = &grsec_disable_privio,
86669+ .maxlen = sizeof(int),
86670+ .mode = 0600,
86671+ .proc_handler = &proc_dointvec,
86672+ },
86673+#endif
86674+#endif
86675+#ifdef CONFIG_GRKERNSEC_LINK
86676+ {
86677+ .procname = "linking_restrictions",
86678+ .data = &grsec_enable_link,
86679+ .maxlen = sizeof(int),
86680+ .mode = 0600,
86681+ .proc_handler = &proc_dointvec,
86682+ },
86683+#endif
86684+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
86685+ {
86686+ .procname = "enforce_symlinksifowner",
86687+ .data = &grsec_enable_symlinkown,
86688+ .maxlen = sizeof(int),
86689+ .mode = 0600,
86690+ .proc_handler = &proc_dointvec,
86691+ },
86692+ {
86693+ .procname = "symlinkown_gid",
86694+ .data = &grsec_symlinkown_gid,
86695+ .maxlen = sizeof(int),
86696+ .mode = 0600,
86697+ .proc_handler = &proc_dointvec,
86698+ },
86699+#endif
86700+#ifdef CONFIG_GRKERNSEC_BRUTE
86701+ {
86702+ .procname = "deter_bruteforce",
86703+ .data = &grsec_enable_brute,
86704+ .maxlen = sizeof(int),
86705+ .mode = 0600,
86706+ .proc_handler = &proc_dointvec,
86707+ },
86708+#endif
86709+#ifdef CONFIG_GRKERNSEC_FIFO
86710+ {
86711+ .procname = "fifo_restrictions",
86712+ .data = &grsec_enable_fifo,
86713+ .maxlen = sizeof(int),
86714+ .mode = 0600,
86715+ .proc_handler = &proc_dointvec,
86716+ },
86717+#endif
86718+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
86719+ {
86720+ .procname = "ptrace_readexec",
86721+ .data = &grsec_enable_ptrace_readexec,
86722+ .maxlen = sizeof(int),
86723+ .mode = 0600,
86724+ .proc_handler = &proc_dointvec,
86725+ },
86726+#endif
86727+#ifdef CONFIG_GRKERNSEC_SETXID
86728+ {
86729+ .procname = "consistent_setxid",
86730+ .data = &grsec_enable_setxid,
86731+ .maxlen = sizeof(int),
86732+ .mode = 0600,
86733+ .proc_handler = &proc_dointvec,
86734+ },
86735+#endif
86736+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86737+ {
86738+ .procname = "ip_blackhole",
86739+ .data = &grsec_enable_blackhole,
86740+ .maxlen = sizeof(int),
86741+ .mode = 0600,
86742+ .proc_handler = &proc_dointvec,
86743+ },
86744+ {
86745+ .procname = "lastack_retries",
86746+ .data = &grsec_lastack_retries,
86747+ .maxlen = sizeof(int),
86748+ .mode = 0600,
86749+ .proc_handler = &proc_dointvec,
86750+ },
86751+#endif
86752+#ifdef CONFIG_GRKERNSEC_EXECLOG
86753+ {
86754+ .procname = "exec_logging",
86755+ .data = &grsec_enable_execlog,
86756+ .maxlen = sizeof(int),
86757+ .mode = 0600,
86758+ .proc_handler = &proc_dointvec,
86759+ },
86760+#endif
86761+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86762+ {
86763+ .procname = "rwxmap_logging",
86764+ .data = &grsec_enable_log_rwxmaps,
86765+ .maxlen = sizeof(int),
86766+ .mode = 0600,
86767+ .proc_handler = &proc_dointvec,
86768+ },
86769+#endif
86770+#ifdef CONFIG_GRKERNSEC_SIGNAL
86771+ {
86772+ .procname = "signal_logging",
86773+ .data = &grsec_enable_signal,
86774+ .maxlen = sizeof(int),
86775+ .mode = 0600,
86776+ .proc_handler = &proc_dointvec,
86777+ },
86778+#endif
86779+#ifdef CONFIG_GRKERNSEC_FORKFAIL
86780+ {
86781+ .procname = "forkfail_logging",
86782+ .data = &grsec_enable_forkfail,
86783+ .maxlen = sizeof(int),
86784+ .mode = 0600,
86785+ .proc_handler = &proc_dointvec,
86786+ },
86787+#endif
86788+#ifdef CONFIG_GRKERNSEC_TIME
86789+ {
86790+ .procname = "timechange_logging",
86791+ .data = &grsec_enable_time,
86792+ .maxlen = sizeof(int),
86793+ .mode = 0600,
86794+ .proc_handler = &proc_dointvec,
86795+ },
86796+#endif
86797+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
86798+ {
86799+ .procname = "chroot_deny_shmat",
86800+ .data = &grsec_enable_chroot_shmat,
86801+ .maxlen = sizeof(int),
86802+ .mode = 0600,
86803+ .proc_handler = &proc_dointvec,
86804+ },
86805+#endif
86806+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
86807+ {
86808+ .procname = "chroot_deny_unix",
86809+ .data = &grsec_enable_chroot_unix,
86810+ .maxlen = sizeof(int),
86811+ .mode = 0600,
86812+ .proc_handler = &proc_dointvec,
86813+ },
86814+#endif
86815+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
86816+ {
86817+ .procname = "chroot_deny_mount",
86818+ .data = &grsec_enable_chroot_mount,
86819+ .maxlen = sizeof(int),
86820+ .mode = 0600,
86821+ .proc_handler = &proc_dointvec,
86822+ },
86823+#endif
86824+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
86825+ {
86826+ .procname = "chroot_deny_fchdir",
86827+ .data = &grsec_enable_chroot_fchdir,
86828+ .maxlen = sizeof(int),
86829+ .mode = 0600,
86830+ .proc_handler = &proc_dointvec,
86831+ },
86832+#endif
86833+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
86834+ {
86835+ .procname = "chroot_deny_chroot",
86836+ .data = &grsec_enable_chroot_double,
86837+ .maxlen = sizeof(int),
86838+ .mode = 0600,
86839+ .proc_handler = &proc_dointvec,
86840+ },
86841+#endif
86842+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
86843+ {
86844+ .procname = "chroot_deny_pivot",
86845+ .data = &grsec_enable_chroot_pivot,
86846+ .maxlen = sizeof(int),
86847+ .mode = 0600,
86848+ .proc_handler = &proc_dointvec,
86849+ },
86850+#endif
86851+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
86852+ {
86853+ .procname = "chroot_enforce_chdir",
86854+ .data = &grsec_enable_chroot_chdir,
86855+ .maxlen = sizeof(int),
86856+ .mode = 0600,
86857+ .proc_handler = &proc_dointvec,
86858+ },
86859+#endif
86860+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
86861+ {
86862+ .procname = "chroot_deny_chmod",
86863+ .data = &grsec_enable_chroot_chmod,
86864+ .maxlen = sizeof(int),
86865+ .mode = 0600,
86866+ .proc_handler = &proc_dointvec,
86867+ },
86868+#endif
86869+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
86870+ {
86871+ .procname = "chroot_deny_mknod",
86872+ .data = &grsec_enable_chroot_mknod,
86873+ .maxlen = sizeof(int),
86874+ .mode = 0600,
86875+ .proc_handler = &proc_dointvec,
86876+ },
86877+#endif
86878+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
86879+ {
86880+ .procname = "chroot_restrict_nice",
86881+ .data = &grsec_enable_chroot_nice,
86882+ .maxlen = sizeof(int),
86883+ .mode = 0600,
86884+ .proc_handler = &proc_dointvec,
86885+ },
86886+#endif
86887+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
86888+ {
86889+ .procname = "chroot_execlog",
86890+ .data = &grsec_enable_chroot_execlog,
86891+ .maxlen = sizeof(int),
86892+ .mode = 0600,
86893+ .proc_handler = &proc_dointvec,
86894+ },
86895+#endif
86896+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
86897+ {
86898+ .procname = "chroot_caps",
86899+ .data = &grsec_enable_chroot_caps,
86900+ .maxlen = sizeof(int),
86901+ .mode = 0600,
86902+ .proc_handler = &proc_dointvec,
86903+ },
86904+#endif
86905+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
86906+ {
86907+ .procname = "chroot_deny_bad_rename",
86908+ .data = &grsec_enable_chroot_rename,
86909+ .maxlen = sizeof(int),
86910+ .mode = 0600,
86911+ .proc_handler = &proc_dointvec,
86912+ },
86913+#endif
86914+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
86915+ {
86916+ .procname = "chroot_deny_sysctl",
86917+ .data = &grsec_enable_chroot_sysctl,
86918+ .maxlen = sizeof(int),
86919+ .mode = 0600,
86920+ .proc_handler = &proc_dointvec,
86921+ },
86922+#endif
86923+#ifdef CONFIG_GRKERNSEC_TPE
86924+ {
86925+ .procname = "tpe",
86926+ .data = &grsec_enable_tpe,
86927+ .maxlen = sizeof(int),
86928+ .mode = 0600,
86929+ .proc_handler = &proc_dointvec,
86930+ },
86931+ {
86932+ .procname = "tpe_gid",
86933+ .data = &grsec_tpe_gid,
86934+ .maxlen = sizeof(int),
86935+ .mode = 0600,
86936+ .proc_handler = &proc_dointvec,
86937+ },
86938+#endif
86939+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
86940+ {
86941+ .procname = "tpe_invert",
86942+ .data = &grsec_enable_tpe_invert,
86943+ .maxlen = sizeof(int),
86944+ .mode = 0600,
86945+ .proc_handler = &proc_dointvec,
86946+ },
86947+#endif
86948+#ifdef CONFIG_GRKERNSEC_TPE_ALL
86949+ {
86950+ .procname = "tpe_restrict_all",
86951+ .data = &grsec_enable_tpe_all,
86952+ .maxlen = sizeof(int),
86953+ .mode = 0600,
86954+ .proc_handler = &proc_dointvec,
86955+ },
86956+#endif
86957+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
86958+ {
86959+ .procname = "socket_all",
86960+ .data = &grsec_enable_socket_all,
86961+ .maxlen = sizeof(int),
86962+ .mode = 0600,
86963+ .proc_handler = &proc_dointvec,
86964+ },
86965+ {
86966+ .procname = "socket_all_gid",
86967+ .data = &grsec_socket_all_gid,
86968+ .maxlen = sizeof(int),
86969+ .mode = 0600,
86970+ .proc_handler = &proc_dointvec,
86971+ },
86972+#endif
86973+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
86974+ {
86975+ .procname = "socket_client",
86976+ .data = &grsec_enable_socket_client,
86977+ .maxlen = sizeof(int),
86978+ .mode = 0600,
86979+ .proc_handler = &proc_dointvec,
86980+ },
86981+ {
86982+ .procname = "socket_client_gid",
86983+ .data = &grsec_socket_client_gid,
86984+ .maxlen = sizeof(int),
86985+ .mode = 0600,
86986+ .proc_handler = &proc_dointvec,
86987+ },
86988+#endif
86989+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
86990+ {
86991+ .procname = "socket_server",
86992+ .data = &grsec_enable_socket_server,
86993+ .maxlen = sizeof(int),
86994+ .mode = 0600,
86995+ .proc_handler = &proc_dointvec,
86996+ },
86997+ {
86998+ .procname = "socket_server_gid",
86999+ .data = &grsec_socket_server_gid,
87000+ .maxlen = sizeof(int),
87001+ .mode = 0600,
87002+ .proc_handler = &proc_dointvec,
87003+ },
87004+#endif
87005+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
87006+ {
87007+ .procname = "audit_group",
87008+ .data = &grsec_enable_group,
87009+ .maxlen = sizeof(int),
87010+ .mode = 0600,
87011+ .proc_handler = &proc_dointvec,
87012+ },
87013+ {
87014+ .procname = "audit_gid",
87015+ .data = &grsec_audit_gid,
87016+ .maxlen = sizeof(int),
87017+ .mode = 0600,
87018+ .proc_handler = &proc_dointvec,
87019+ },
87020+#endif
87021+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
87022+ {
87023+ .procname = "audit_chdir",
87024+ .data = &grsec_enable_chdir,
87025+ .maxlen = sizeof(int),
87026+ .mode = 0600,
87027+ .proc_handler = &proc_dointvec,
87028+ },
87029+#endif
87030+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
87031+ {
87032+ .procname = "audit_mount",
87033+ .data = &grsec_enable_mount,
87034+ .maxlen = sizeof(int),
87035+ .mode = 0600,
87036+ .proc_handler = &proc_dointvec,
87037+ },
87038+#endif
87039+#ifdef CONFIG_GRKERNSEC_DMESG
87040+ {
87041+ .procname = "dmesg",
87042+ .data = &grsec_enable_dmesg,
87043+ .maxlen = sizeof(int),
87044+ .mode = 0600,
87045+ .proc_handler = &proc_dointvec,
87046+ },
87047+#endif
87048+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
87049+ {
87050+ .procname = "chroot_findtask",
87051+ .data = &grsec_enable_chroot_findtask,
87052+ .maxlen = sizeof(int),
87053+ .mode = 0600,
87054+ .proc_handler = &proc_dointvec,
87055+ },
87056+#endif
87057+#ifdef CONFIG_GRKERNSEC_RESLOG
87058+ {
87059+ .procname = "resource_logging",
87060+ .data = &grsec_resource_logging,
87061+ .maxlen = sizeof(int),
87062+ .mode = 0600,
87063+ .proc_handler = &proc_dointvec,
87064+ },
87065+#endif
87066+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
87067+ {
87068+ .procname = "audit_ptrace",
87069+ .data = &grsec_enable_audit_ptrace,
87070+ .maxlen = sizeof(int),
87071+ .mode = 0600,
87072+ .proc_handler = &proc_dointvec,
87073+ },
87074+#endif
87075+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
87076+ {
87077+ .procname = "harden_ptrace",
87078+ .data = &grsec_enable_harden_ptrace,
87079+ .maxlen = sizeof(int),
87080+ .mode = 0600,
87081+ .proc_handler = &proc_dointvec,
87082+ },
87083+#endif
87084+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
87085+ {
87086+ .procname = "harden_ipc",
87087+ .data = &grsec_enable_harden_ipc,
87088+ .maxlen = sizeof(int),
87089+ .mode = 0600,
87090+ .proc_handler = &proc_dointvec,
87091+ },
87092+#endif
87093+ {
87094+ .procname = "grsec_lock",
87095+ .data = &grsec_lock,
87096+ .maxlen = sizeof(int),
87097+ .mode = 0600,
87098+ .proc_handler = &proc_dointvec,
87099+ },
87100+#endif
87101+#ifdef CONFIG_GRKERNSEC_ROFS
87102+ {
87103+ .procname = "romount_protect",
87104+ .data = &grsec_enable_rofs,
87105+ .maxlen = sizeof(int),
87106+ .mode = 0600,
87107+ .proc_handler = &proc_dointvec_minmax,
87108+ .extra1 = &one,
87109+ .extra2 = &one,
87110+ },
87111+#endif
87112+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
87113+ {
87114+ .procname = "deny_new_usb",
87115+ .data = &grsec_deny_new_usb,
87116+ .maxlen = sizeof(int),
87117+ .mode = 0600,
87118+ .proc_handler = &proc_dointvec,
87119+ },
87120+#endif
87121+ { }
87122+};
87123+#endif
87124diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
87125new file mode 100644
87126index 0000000..61b514e
87127--- /dev/null
87128+++ b/grsecurity/grsec_time.c
87129@@ -0,0 +1,16 @@
87130+#include <linux/kernel.h>
87131+#include <linux/sched.h>
87132+#include <linux/grinternal.h>
87133+#include <linux/module.h>
87134+
87135+void
87136+gr_log_timechange(void)
87137+{
87138+#ifdef CONFIG_GRKERNSEC_TIME
87139+ if (grsec_enable_time)
87140+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
87141+#endif
87142+ return;
87143+}
87144+
87145+EXPORT_SYMBOL_GPL(gr_log_timechange);
87146diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
87147new file mode 100644
87148index 0000000..d1953de
87149--- /dev/null
87150+++ b/grsecurity/grsec_tpe.c
87151@@ -0,0 +1,78 @@
87152+#include <linux/kernel.h>
87153+#include <linux/sched.h>
87154+#include <linux/file.h>
87155+#include <linux/fs.h>
87156+#include <linux/grinternal.h>
87157+
87158+extern int gr_acl_tpe_check(void);
87159+
87160+int
87161+gr_tpe_allow(const struct file *file)
87162+{
87163+#ifdef CONFIG_GRKERNSEC
87164+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
87165+ struct inode *file_inode = file->f_path.dentry->d_inode;
87166+ const struct cred *cred = current_cred();
87167+ char *msg = NULL;
87168+ char *msg2 = NULL;
87169+
87170+ // never restrict root
87171+ if (gr_is_global_root(cred->uid))
87172+ return 1;
87173+
87174+ if (grsec_enable_tpe) {
87175+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
87176+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
87177+ msg = "not being in trusted group";
87178+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
87179+ msg = "being in untrusted group";
87180+#else
87181+ if (in_group_p(grsec_tpe_gid))
87182+ msg = "being in untrusted group";
87183+#endif
87184+ }
87185+ if (!msg && gr_acl_tpe_check())
87186+ msg = "being in untrusted role";
87187+
87188+ // not in any affected group/role
87189+ if (!msg)
87190+ goto next_check;
87191+
87192+ if (gr_is_global_nonroot(inode->i_uid))
87193+ msg2 = "file in non-root-owned directory";
87194+ else if (inode->i_mode & S_IWOTH)
87195+ msg2 = "file in world-writable directory";
87196+ else if (inode->i_mode & S_IWGRP)
87197+ msg2 = "file in group-writable directory";
87198+ else if (file_inode->i_mode & S_IWOTH)
87199+ msg2 = "file is world-writable";
87200+
87201+ if (msg && msg2) {
87202+ char fullmsg[70] = {0};
87203+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
87204+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
87205+ return 0;
87206+ }
87207+ msg = NULL;
87208+next_check:
87209+#ifdef CONFIG_GRKERNSEC_TPE_ALL
87210+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
87211+ return 1;
87212+
87213+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
87214+ msg = "directory not owned by user";
87215+ else if (inode->i_mode & S_IWOTH)
87216+ msg = "file in world-writable directory";
87217+ else if (inode->i_mode & S_IWGRP)
87218+ msg = "file in group-writable directory";
87219+ else if (file_inode->i_mode & S_IWOTH)
87220+ msg = "file is world-writable";
87221+
87222+ if (msg) {
87223+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
87224+ return 0;
87225+ }
87226+#endif
87227+#endif
87228+ return 1;
87229+}
87230diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
87231new file mode 100644
87232index 0000000..ae02d8e
87233--- /dev/null
87234+++ b/grsecurity/grsec_usb.c
87235@@ -0,0 +1,15 @@
87236+#include <linux/kernel.h>
87237+#include <linux/grinternal.h>
87238+#include <linux/module.h>
87239+
87240+int gr_handle_new_usb(void)
87241+{
87242+#ifdef CONFIG_GRKERNSEC_DENYUSB
87243+ if (grsec_deny_new_usb) {
87244+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
87245+ return 1;
87246+ }
87247+#endif
87248+ return 0;
87249+}
87250+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
87251diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
87252new file mode 100644
87253index 0000000..158b330
87254--- /dev/null
87255+++ b/grsecurity/grsum.c
87256@@ -0,0 +1,64 @@
87257+#include <linux/err.h>
87258+#include <linux/kernel.h>
87259+#include <linux/sched.h>
87260+#include <linux/mm.h>
87261+#include <linux/scatterlist.h>
87262+#include <linux/crypto.h>
87263+#include <linux/gracl.h>
87264+
87265+
87266+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
87267+#error "crypto and sha256 must be built into the kernel"
87268+#endif
87269+
87270+int
87271+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
87272+{
87273+ struct crypto_hash *tfm;
87274+ struct hash_desc desc;
87275+ struct scatterlist sg[2];
87276+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
87277+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
87278+ unsigned long *sumptr = (unsigned long *)sum;
87279+ int cryptres;
87280+ int retval = 1;
87281+ volatile int mismatched = 0;
87282+ volatile int dummy = 0;
87283+ unsigned int i;
87284+
87285+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
87286+ if (IS_ERR(tfm)) {
87287+ /* should never happen, since sha256 should be built in */
87288+ memset(entry->pw, 0, GR_PW_LEN);
87289+ return 1;
87290+ }
87291+
87292+ sg_init_table(sg, 2);
87293+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
87294+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
87295+
87296+ desc.tfm = tfm;
87297+ desc.flags = 0;
87298+
87299+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
87300+ temp_sum);
87301+
87302+ memset(entry->pw, 0, GR_PW_LEN);
87303+
87304+ if (cryptres)
87305+ goto out;
87306+
87307+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
87308+ if (sumptr[i] != tmpsumptr[i])
87309+ mismatched = 1;
87310+ else
87311+ dummy = 1; // waste a cycle
87312+
87313+ if (!mismatched)
87314+ retval = dummy - 1;
87315+
87316+out:
87317+ crypto_free_hash(tfm);
87318+
87319+ return retval;
87320+}
87321diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
87322index 5bdab6b..9ae82fe 100644
87323--- a/include/asm-generic/4level-fixup.h
87324+++ b/include/asm-generic/4level-fixup.h
87325@@ -14,8 +14,10 @@
87326 #define pmd_alloc(mm, pud, address) \
87327 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
87328 NULL: pmd_offset(pud, address))
87329+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
87330
87331 #define pud_alloc(mm, pgd, address) (pgd)
87332+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
87333 #define pud_offset(pgd, start) (pgd)
87334 #define pud_none(pud) 0
87335 #define pud_bad(pud) 0
87336diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
87337index b7babf0..1e4b4f1 100644
87338--- a/include/asm-generic/atomic-long.h
87339+++ b/include/asm-generic/atomic-long.h
87340@@ -22,6 +22,12 @@
87341
87342 typedef atomic64_t atomic_long_t;
87343
87344+#ifdef CONFIG_PAX_REFCOUNT
87345+typedef atomic64_unchecked_t atomic_long_unchecked_t;
87346+#else
87347+typedef atomic64_t atomic_long_unchecked_t;
87348+#endif
87349+
87350 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
87351
87352 static inline long atomic_long_read(atomic_long_t *l)
87353@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
87354 return (long)atomic64_read(v);
87355 }
87356
87357+#ifdef CONFIG_PAX_REFCOUNT
87358+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
87359+{
87360+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87361+
87362+ return (long)atomic64_read_unchecked(v);
87363+}
87364+#endif
87365+
87366 static inline void atomic_long_set(atomic_long_t *l, long i)
87367 {
87368 atomic64_t *v = (atomic64_t *)l;
87369@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
87370 atomic64_set(v, i);
87371 }
87372
87373+#ifdef CONFIG_PAX_REFCOUNT
87374+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
87375+{
87376+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87377+
87378+ atomic64_set_unchecked(v, i);
87379+}
87380+#endif
87381+
87382 static inline void atomic_long_inc(atomic_long_t *l)
87383 {
87384 atomic64_t *v = (atomic64_t *)l;
87385@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
87386 atomic64_inc(v);
87387 }
87388
87389+#ifdef CONFIG_PAX_REFCOUNT
87390+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
87391+{
87392+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87393+
87394+ atomic64_inc_unchecked(v);
87395+}
87396+#endif
87397+
87398 static inline void atomic_long_dec(atomic_long_t *l)
87399 {
87400 atomic64_t *v = (atomic64_t *)l;
87401@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
87402 atomic64_dec(v);
87403 }
87404
87405+#ifdef CONFIG_PAX_REFCOUNT
87406+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
87407+{
87408+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87409+
87410+ atomic64_dec_unchecked(v);
87411+}
87412+#endif
87413+
87414 static inline void atomic_long_add(long i, atomic_long_t *l)
87415 {
87416 atomic64_t *v = (atomic64_t *)l;
87417@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
87418 atomic64_add(i, v);
87419 }
87420
87421+#ifdef CONFIG_PAX_REFCOUNT
87422+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
87423+{
87424+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87425+
87426+ atomic64_add_unchecked(i, v);
87427+}
87428+#endif
87429+
87430 static inline void atomic_long_sub(long i, atomic_long_t *l)
87431 {
87432 atomic64_t *v = (atomic64_t *)l;
87433@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
87434 atomic64_sub(i, v);
87435 }
87436
87437+#ifdef CONFIG_PAX_REFCOUNT
87438+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
87439+{
87440+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87441+
87442+ atomic64_sub_unchecked(i, v);
87443+}
87444+#endif
87445+
87446 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
87447 {
87448 atomic64_t *v = (atomic64_t *)l;
87449@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
87450 return atomic64_add_negative(i, v);
87451 }
87452
87453-static inline long atomic_long_add_return(long i, atomic_long_t *l)
87454+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
87455 {
87456 atomic64_t *v = (atomic64_t *)l;
87457
87458 return (long)atomic64_add_return(i, v);
87459 }
87460
87461+#ifdef CONFIG_PAX_REFCOUNT
87462+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
87463+{
87464+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87465+
87466+ return (long)atomic64_add_return_unchecked(i, v);
87467+}
87468+#endif
87469+
87470 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
87471 {
87472 atomic64_t *v = (atomic64_t *)l;
87473@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
87474 return (long)atomic64_inc_return(v);
87475 }
87476
87477+#ifdef CONFIG_PAX_REFCOUNT
87478+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
87479+{
87480+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87481+
87482+ return (long)atomic64_inc_return_unchecked(v);
87483+}
87484+#endif
87485+
87486 static inline long atomic_long_dec_return(atomic_long_t *l)
87487 {
87488 atomic64_t *v = (atomic64_t *)l;
87489@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
87490
87491 typedef atomic_t atomic_long_t;
87492
87493+#ifdef CONFIG_PAX_REFCOUNT
87494+typedef atomic_unchecked_t atomic_long_unchecked_t;
87495+#else
87496+typedef atomic_t atomic_long_unchecked_t;
87497+#endif
87498+
87499 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
87500 static inline long atomic_long_read(atomic_long_t *l)
87501 {
87502@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
87503 return (long)atomic_read(v);
87504 }
87505
87506+#ifdef CONFIG_PAX_REFCOUNT
87507+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
87508+{
87509+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87510+
87511+ return (long)atomic_read_unchecked(v);
87512+}
87513+#endif
87514+
87515 static inline void atomic_long_set(atomic_long_t *l, long i)
87516 {
87517 atomic_t *v = (atomic_t *)l;
87518@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
87519 atomic_set(v, i);
87520 }
87521
87522+#ifdef CONFIG_PAX_REFCOUNT
87523+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
87524+{
87525+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87526+
87527+ atomic_set_unchecked(v, i);
87528+}
87529+#endif
87530+
87531 static inline void atomic_long_inc(atomic_long_t *l)
87532 {
87533 atomic_t *v = (atomic_t *)l;
87534@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
87535 atomic_inc(v);
87536 }
87537
87538+#ifdef CONFIG_PAX_REFCOUNT
87539+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
87540+{
87541+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87542+
87543+ atomic_inc_unchecked(v);
87544+}
87545+#endif
87546+
87547 static inline void atomic_long_dec(atomic_long_t *l)
87548 {
87549 atomic_t *v = (atomic_t *)l;
87550@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
87551 atomic_dec(v);
87552 }
87553
87554+#ifdef CONFIG_PAX_REFCOUNT
87555+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
87556+{
87557+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87558+
87559+ atomic_dec_unchecked(v);
87560+}
87561+#endif
87562+
87563 static inline void atomic_long_add(long i, atomic_long_t *l)
87564 {
87565 atomic_t *v = (atomic_t *)l;
87566@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
87567 atomic_add(i, v);
87568 }
87569
87570+#ifdef CONFIG_PAX_REFCOUNT
87571+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
87572+{
87573+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87574+
87575+ atomic_add_unchecked(i, v);
87576+}
87577+#endif
87578+
87579 static inline void atomic_long_sub(long i, atomic_long_t *l)
87580 {
87581 atomic_t *v = (atomic_t *)l;
87582@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
87583 atomic_sub(i, v);
87584 }
87585
87586+#ifdef CONFIG_PAX_REFCOUNT
87587+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
87588+{
87589+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87590+
87591+ atomic_sub_unchecked(i, v);
87592+}
87593+#endif
87594+
87595 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
87596 {
87597 atomic_t *v = (atomic_t *)l;
87598@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
87599 return atomic_add_negative(i, v);
87600 }
87601
87602-static inline long atomic_long_add_return(long i, atomic_long_t *l)
87603+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
87604 {
87605 atomic_t *v = (atomic_t *)l;
87606
87607 return (long)atomic_add_return(i, v);
87608 }
87609
87610+#ifdef CONFIG_PAX_REFCOUNT
87611+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
87612+{
87613+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87614+
87615+ return (long)atomic_add_return_unchecked(i, v);
87616+}
87617+
87618+#endif
87619+
87620 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
87621 {
87622 atomic_t *v = (atomic_t *)l;
87623@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
87624 return (long)atomic_inc_return(v);
87625 }
87626
87627+#ifdef CONFIG_PAX_REFCOUNT
87628+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
87629+{
87630+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87631+
87632+ return (long)atomic_inc_return_unchecked(v);
87633+}
87634+#endif
87635+
87636 static inline long atomic_long_dec_return(atomic_long_t *l)
87637 {
87638 atomic_t *v = (atomic_t *)l;
87639@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
87640
87641 #endif /* BITS_PER_LONG == 64 */
87642
87643+#ifdef CONFIG_PAX_REFCOUNT
87644+static inline void pax_refcount_needs_these_functions(void)
87645+{
87646+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
87647+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
87648+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
87649+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
87650+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
87651+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
87652+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
87653+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
87654+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
87655+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
87656+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
87657+#ifdef CONFIG_X86
87658+ atomic_clear_mask_unchecked(0, NULL);
87659+ atomic_set_mask_unchecked(0, NULL);
87660+#endif
87661+
87662+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
87663+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
87664+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
87665+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
87666+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
87667+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
87668+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
87669+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
87670+}
87671+#else
87672+#define atomic_read_unchecked(v) atomic_read(v)
87673+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
87674+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
87675+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
87676+#define atomic_inc_unchecked(v) atomic_inc(v)
87677+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
87678+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
87679+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
87680+#define atomic_dec_unchecked(v) atomic_dec(v)
87681+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
87682+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
87683+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
87684+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
87685+
87686+#define atomic_long_read_unchecked(v) atomic_long_read(v)
87687+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
87688+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
87689+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
87690+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
87691+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
87692+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
87693+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
87694+#endif
87695+
87696 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
87697diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
87698index 30ad9c8..c70c170 100644
87699--- a/include/asm-generic/atomic64.h
87700+++ b/include/asm-generic/atomic64.h
87701@@ -16,6 +16,8 @@ typedef struct {
87702 long long counter;
87703 } atomic64_t;
87704
87705+typedef atomic64_t atomic64_unchecked_t;
87706+
87707 #define ATOMIC64_INIT(i) { (i) }
87708
87709 extern long long atomic64_read(const atomic64_t *v);
87710@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
87711 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
87712 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
87713
87714+#define atomic64_read_unchecked(v) atomic64_read(v)
87715+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
87716+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
87717+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
87718+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
87719+#define atomic64_inc_unchecked(v) atomic64_inc(v)
87720+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
87721+#define atomic64_dec_unchecked(v) atomic64_dec(v)
87722+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
87723+
87724 #endif /* _ASM_GENERIC_ATOMIC64_H */
87725diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
87726index f5c40b0..e902f9d 100644
87727--- a/include/asm-generic/barrier.h
87728+++ b/include/asm-generic/barrier.h
87729@@ -82,7 +82,7 @@
87730 do { \
87731 compiletime_assert_atomic_type(*p); \
87732 smp_mb(); \
87733- ACCESS_ONCE(*p) = (v); \
87734+ ACCESS_ONCE_RW(*p) = (v); \
87735 } while (0)
87736
87737 #define smp_load_acquire(p) \
87738diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
87739index a60a7cc..0fe12f2 100644
87740--- a/include/asm-generic/bitops/__fls.h
87741+++ b/include/asm-generic/bitops/__fls.h
87742@@ -9,7 +9,7 @@
87743 *
87744 * Undefined if no set bit exists, so code should check against 0 first.
87745 */
87746-static __always_inline unsigned long __fls(unsigned long word)
87747+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
87748 {
87749 int num = BITS_PER_LONG - 1;
87750
87751diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
87752index 0576d1f..dad6c71 100644
87753--- a/include/asm-generic/bitops/fls.h
87754+++ b/include/asm-generic/bitops/fls.h
87755@@ -9,7 +9,7 @@
87756 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
87757 */
87758
87759-static __always_inline int fls(int x)
87760+static __always_inline int __intentional_overflow(-1) fls(int x)
87761 {
87762 int r = 32;
87763
87764diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
87765index b097cf8..3d40e14 100644
87766--- a/include/asm-generic/bitops/fls64.h
87767+++ b/include/asm-generic/bitops/fls64.h
87768@@ -15,7 +15,7 @@
87769 * at position 64.
87770 */
87771 #if BITS_PER_LONG == 32
87772-static __always_inline int fls64(__u64 x)
87773+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
87774 {
87775 __u32 h = x >> 32;
87776 if (h)
87777@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
87778 return fls(x);
87779 }
87780 #elif BITS_PER_LONG == 64
87781-static __always_inline int fls64(__u64 x)
87782+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
87783 {
87784 if (x == 0)
87785 return 0;
87786diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
87787index 1bfcfe5..e04c5c9 100644
87788--- a/include/asm-generic/cache.h
87789+++ b/include/asm-generic/cache.h
87790@@ -6,7 +6,7 @@
87791 * cache lines need to provide their own cache.h.
87792 */
87793
87794-#define L1_CACHE_SHIFT 5
87795-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
87796+#define L1_CACHE_SHIFT 5UL
87797+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
87798
87799 #endif /* __ASM_GENERIC_CACHE_H */
87800diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
87801index 0d68a1e..b74a761 100644
87802--- a/include/asm-generic/emergency-restart.h
87803+++ b/include/asm-generic/emergency-restart.h
87804@@ -1,7 +1,7 @@
87805 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
87806 #define _ASM_GENERIC_EMERGENCY_RESTART_H
87807
87808-static inline void machine_emergency_restart(void)
87809+static inline __noreturn void machine_emergency_restart(void)
87810 {
87811 machine_restart(NULL);
87812 }
87813diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
87814index 90f99c7..00ce236 100644
87815--- a/include/asm-generic/kmap_types.h
87816+++ b/include/asm-generic/kmap_types.h
87817@@ -2,9 +2,9 @@
87818 #define _ASM_GENERIC_KMAP_TYPES_H
87819
87820 #ifdef __WITH_KM_FENCE
87821-# define KM_TYPE_NR 41
87822+# define KM_TYPE_NR 42
87823 #else
87824-# define KM_TYPE_NR 20
87825+# define KM_TYPE_NR 21
87826 #endif
87827
87828 #endif
87829diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
87830index 9ceb03b..62b0b8f 100644
87831--- a/include/asm-generic/local.h
87832+++ b/include/asm-generic/local.h
87833@@ -23,24 +23,37 @@ typedef struct
87834 atomic_long_t a;
87835 } local_t;
87836
87837+typedef struct {
87838+ atomic_long_unchecked_t a;
87839+} local_unchecked_t;
87840+
87841 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
87842
87843 #define local_read(l) atomic_long_read(&(l)->a)
87844+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
87845 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
87846+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
87847 #define local_inc(l) atomic_long_inc(&(l)->a)
87848+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
87849 #define local_dec(l) atomic_long_dec(&(l)->a)
87850+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
87851 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
87852+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
87853 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
87854+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
87855
87856 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
87857 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
87858 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
87859 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
87860 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
87861+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
87862 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
87863 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
87864+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
87865
87866 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
87867+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
87868 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
87869 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
87870 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
87871diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
87872index 725612b..9cc513a 100644
87873--- a/include/asm-generic/pgtable-nopmd.h
87874+++ b/include/asm-generic/pgtable-nopmd.h
87875@@ -1,14 +1,19 @@
87876 #ifndef _PGTABLE_NOPMD_H
87877 #define _PGTABLE_NOPMD_H
87878
87879-#ifndef __ASSEMBLY__
87880-
87881 #include <asm-generic/pgtable-nopud.h>
87882
87883-struct mm_struct;
87884-
87885 #define __PAGETABLE_PMD_FOLDED
87886
87887+#define PMD_SHIFT PUD_SHIFT
87888+#define PTRS_PER_PMD 1
87889+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
87890+#define PMD_MASK (~(PMD_SIZE-1))
87891+
87892+#ifndef __ASSEMBLY__
87893+
87894+struct mm_struct;
87895+
87896 /*
87897 * Having the pmd type consist of a pud gets the size right, and allows
87898 * us to conceptually access the pud entry that this pmd is folded into
87899@@ -16,11 +21,6 @@ struct mm_struct;
87900 */
87901 typedef struct { pud_t pud; } pmd_t;
87902
87903-#define PMD_SHIFT PUD_SHIFT
87904-#define PTRS_PER_PMD 1
87905-#define PMD_SIZE (1UL << PMD_SHIFT)
87906-#define PMD_MASK (~(PMD_SIZE-1))
87907-
87908 /*
87909 * The "pud_xxx()" functions here are trivial for a folded two-level
87910 * setup: the pmd is never bad, and a pmd always exists (as it's folded
87911diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
87912index 810431d..0ec4804f 100644
87913--- a/include/asm-generic/pgtable-nopud.h
87914+++ b/include/asm-generic/pgtable-nopud.h
87915@@ -1,10 +1,15 @@
87916 #ifndef _PGTABLE_NOPUD_H
87917 #define _PGTABLE_NOPUD_H
87918
87919-#ifndef __ASSEMBLY__
87920-
87921 #define __PAGETABLE_PUD_FOLDED
87922
87923+#define PUD_SHIFT PGDIR_SHIFT
87924+#define PTRS_PER_PUD 1
87925+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
87926+#define PUD_MASK (~(PUD_SIZE-1))
87927+
87928+#ifndef __ASSEMBLY__
87929+
87930 /*
87931 * Having the pud type consist of a pgd gets the size right, and allows
87932 * us to conceptually access the pgd entry that this pud is folded into
87933@@ -12,11 +17,6 @@
87934 */
87935 typedef struct { pgd_t pgd; } pud_t;
87936
87937-#define PUD_SHIFT PGDIR_SHIFT
87938-#define PTRS_PER_PUD 1
87939-#define PUD_SIZE (1UL << PUD_SHIFT)
87940-#define PUD_MASK (~(PUD_SIZE-1))
87941-
87942 /*
87943 * The "pgd_xxx()" functions here are trivial for a folded two-level
87944 * setup: the pud is never bad, and a pud always exists (as it's folded
87945@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
87946 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
87947
87948 #define pgd_populate(mm, pgd, pud) do { } while (0)
87949+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
87950 /*
87951 * (puds are folded into pgds so this doesn't get actually called,
87952 * but the define is needed for a generic inline function.)
87953diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
87954index 4d46085..f4e92ef 100644
87955--- a/include/asm-generic/pgtable.h
87956+++ b/include/asm-generic/pgtable.h
87957@@ -689,6 +689,22 @@ static inline int pmd_protnone(pmd_t pmd)
87958 }
87959 #endif /* CONFIG_NUMA_BALANCING */
87960
87961+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
87962+#ifdef CONFIG_PAX_KERNEXEC
87963+#error KERNEXEC requires pax_open_kernel
87964+#else
87965+static inline unsigned long pax_open_kernel(void) { return 0; }
87966+#endif
87967+#endif
87968+
87969+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
87970+#ifdef CONFIG_PAX_KERNEXEC
87971+#error KERNEXEC requires pax_close_kernel
87972+#else
87973+static inline unsigned long pax_close_kernel(void) { return 0; }
87974+#endif
87975+#endif
87976+
87977 #endif /* CONFIG_MMU */
87978
87979 #endif /* !__ASSEMBLY__ */
87980diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
87981index 72d8803..cb9749c 100644
87982--- a/include/asm-generic/uaccess.h
87983+++ b/include/asm-generic/uaccess.h
87984@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
87985 return __clear_user(to, n);
87986 }
87987
87988+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
87989+#ifdef CONFIG_PAX_MEMORY_UDEREF
87990+#error UDEREF requires pax_open_userland
87991+#else
87992+static inline unsigned long pax_open_userland(void) { return 0; }
87993+#endif
87994+#endif
87995+
87996+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
87997+#ifdef CONFIG_PAX_MEMORY_UDEREF
87998+#error UDEREF requires pax_close_userland
87999+#else
88000+static inline unsigned long pax_close_userland(void) { return 0; }
88001+#endif
88002+#endif
88003+
88004 #endif /* __ASM_GENERIC_UACCESS_H */
88005diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
88006index ac78910..775a306 100644
88007--- a/include/asm-generic/vmlinux.lds.h
88008+++ b/include/asm-generic/vmlinux.lds.h
88009@@ -234,6 +234,7 @@
88010 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
88011 VMLINUX_SYMBOL(__start_rodata) = .; \
88012 *(.rodata) *(.rodata.*) \
88013+ *(.data..read_only) \
88014 *(__vermagic) /* Kernel version magic */ \
88015 . = ALIGN(8); \
88016 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
88017@@ -727,17 +728,18 @@
88018 * section in the linker script will go there too. @phdr should have
88019 * a leading colon.
88020 *
88021- * Note that this macros defines __per_cpu_load as an absolute symbol.
88022+ * Note that this macros defines per_cpu_load as an absolute symbol.
88023 * If there is no need to put the percpu section at a predetermined
88024 * address, use PERCPU_SECTION.
88025 */
88026 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
88027- VMLINUX_SYMBOL(__per_cpu_load) = .; \
88028- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
88029+ per_cpu_load = .; \
88030+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
88031 - LOAD_OFFSET) { \
88032+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
88033 PERCPU_INPUT(cacheline) \
88034 } phdr \
88035- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
88036+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
88037
88038 /**
88039 * PERCPU_SECTION - define output section for percpu area, simple version
88040diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
88041index 623a59c..1e79ab9 100644
88042--- a/include/crypto/algapi.h
88043+++ b/include/crypto/algapi.h
88044@@ -34,7 +34,7 @@ struct crypto_type {
88045 unsigned int maskclear;
88046 unsigned int maskset;
88047 unsigned int tfmsize;
88048-};
88049+} __do_const;
88050
88051 struct crypto_instance {
88052 struct crypto_alg alg;
88053diff --git a/include/drm/drmP.h b/include/drm/drmP.h
88054index e928625..ff97886 100644
88055--- a/include/drm/drmP.h
88056+++ b/include/drm/drmP.h
88057@@ -59,6 +59,7 @@
88058
88059 #include <asm/mman.h>
88060 #include <asm/pgalloc.h>
88061+#include <asm/local.h>
88062 #include <asm/uaccess.h>
88063
88064 #include <uapi/drm/drm.h>
88065@@ -133,17 +134,18 @@ void drm_err(const char *format, ...);
88066 /*@{*/
88067
88068 /* driver capabilities and requirements mask */
88069-#define DRIVER_USE_AGP 0x1
88070-#define DRIVER_PCI_DMA 0x8
88071-#define DRIVER_SG 0x10
88072-#define DRIVER_HAVE_DMA 0x20
88073-#define DRIVER_HAVE_IRQ 0x40
88074-#define DRIVER_IRQ_SHARED 0x80
88075-#define DRIVER_GEM 0x1000
88076-#define DRIVER_MODESET 0x2000
88077-#define DRIVER_PRIME 0x4000
88078-#define DRIVER_RENDER 0x8000
88079-#define DRIVER_ATOMIC 0x10000
88080+#define DRIVER_USE_AGP 0x1
88081+#define DRIVER_PCI_DMA 0x8
88082+#define DRIVER_SG 0x10
88083+#define DRIVER_HAVE_DMA 0x20
88084+#define DRIVER_HAVE_IRQ 0x40
88085+#define DRIVER_IRQ_SHARED 0x80
88086+#define DRIVER_GEM 0x1000
88087+#define DRIVER_MODESET 0x2000
88088+#define DRIVER_PRIME 0x4000
88089+#define DRIVER_RENDER 0x8000
88090+#define DRIVER_ATOMIC 0x10000
88091+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
88092
88093 /***********************************************************************/
88094 /** \name Macros to make printk easier */
88095@@ -224,10 +226,12 @@ void drm_err(const char *format, ...);
88096 * \param cmd command.
88097 * \param arg argument.
88098 */
88099-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
88100+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
88101+ struct drm_file *file_priv);
88102+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
88103 struct drm_file *file_priv);
88104
88105-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
88106+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
88107 unsigned long arg);
88108
88109 #define DRM_IOCTL_NR(n) _IOC_NR(n)
88110@@ -243,10 +247,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
88111 struct drm_ioctl_desc {
88112 unsigned int cmd;
88113 int flags;
88114- drm_ioctl_t *func;
88115+ drm_ioctl_t func;
88116 unsigned int cmd_drv;
88117 const char *name;
88118-};
88119+} __do_const;
88120
88121 /**
88122 * Creates a driver or general drm_ioctl_desc array entry for the given
88123@@ -632,7 +636,8 @@ struct drm_info_list {
88124 int (*show)(struct seq_file*, void*); /** show callback */
88125 u32 driver_features; /**< Required driver features for this entry */
88126 void *data;
88127-};
88128+} __do_const;
88129+typedef struct drm_info_list __no_const drm_info_list_no_const;
88130
88131 /**
88132 * debugfs node structure. This structure represents a debugfs file.
88133@@ -716,7 +721,7 @@ struct drm_device {
88134
88135 /** \name Usage Counters */
88136 /*@{ */
88137- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
88138+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
88139 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
88140 int buf_use; /**< Buffers in use -- cannot alloc */
88141 atomic_t buf_alloc; /**< Buffer allocation in progress */
88142diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
88143index c250a22..59d2094 100644
88144--- a/include/drm/drm_crtc_helper.h
88145+++ b/include/drm/drm_crtc_helper.h
88146@@ -160,7 +160,7 @@ struct drm_encoder_helper_funcs {
88147 int (*atomic_check)(struct drm_encoder *encoder,
88148 struct drm_crtc_state *crtc_state,
88149 struct drm_connector_state *conn_state);
88150-};
88151+} __no_const;
88152
88153 /**
88154 * struct drm_connector_helper_funcs - helper operations for connectors
88155diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
88156index d016dc5..3951fe0 100644
88157--- a/include/drm/i915_pciids.h
88158+++ b/include/drm/i915_pciids.h
88159@@ -37,7 +37,7 @@
88160 */
88161 #define INTEL_VGA_DEVICE(id, info) { \
88162 0x8086, id, \
88163- ~0, ~0, \
88164+ PCI_ANY_ID, PCI_ANY_ID, \
88165 0x030000, 0xff0000, \
88166 (unsigned long) info }
88167
88168diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
88169index 72dcbe8..8db58d7 100644
88170--- a/include/drm/ttm/ttm_memory.h
88171+++ b/include/drm/ttm/ttm_memory.h
88172@@ -48,7 +48,7 @@
88173
88174 struct ttm_mem_shrink {
88175 int (*do_shrink) (struct ttm_mem_shrink *);
88176-};
88177+} __no_const;
88178
88179 /**
88180 * struct ttm_mem_global - Global memory accounting structure.
88181diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
88182index 49a8284..9643967 100644
88183--- a/include/drm/ttm/ttm_page_alloc.h
88184+++ b/include/drm/ttm/ttm_page_alloc.h
88185@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
88186 */
88187 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
88188
88189+struct device;
88190 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
88191 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
88192
88193diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
88194index 4b840e8..155d235 100644
88195--- a/include/keys/asymmetric-subtype.h
88196+++ b/include/keys/asymmetric-subtype.h
88197@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
88198 /* Verify the signature on a key of this subtype (optional) */
88199 int (*verify_signature)(const struct key *key,
88200 const struct public_key_signature *sig);
88201-};
88202+} __do_const;
88203
88204 /**
88205 * asymmetric_key_subtype - Get the subtype from an asymmetric key
88206diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
88207index c1da539..1dcec55 100644
88208--- a/include/linux/atmdev.h
88209+++ b/include/linux/atmdev.h
88210@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
88211 #endif
88212
88213 struct k_atm_aal_stats {
88214-#define __HANDLE_ITEM(i) atomic_t i
88215+#define __HANDLE_ITEM(i) atomic_unchecked_t i
88216 __AAL_STAT_ITEMS
88217 #undef __HANDLE_ITEM
88218 };
88219@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
88220 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
88221 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
88222 struct module *owner;
88223-};
88224+} __do_const ;
88225
88226 struct atmphy_ops {
88227 int (*start)(struct atm_dev *dev);
88228diff --git a/include/linux/atomic.h b/include/linux/atomic.h
88229index 5b08a85..60922fb 100644
88230--- a/include/linux/atomic.h
88231+++ b/include/linux/atomic.h
88232@@ -12,7 +12,7 @@
88233 * Atomically adds @a to @v, so long as @v was not already @u.
88234 * Returns non-zero if @v was not @u, and zero otherwise.
88235 */
88236-static inline int atomic_add_unless(atomic_t *v, int a, int u)
88237+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
88238 {
88239 return __atomic_add_unless(v, a, u) != u;
88240 }
88241diff --git a/include/linux/audit.h b/include/linux/audit.h
88242index c2e7e3a..8bfc0e1 100644
88243--- a/include/linux/audit.h
88244+++ b/include/linux/audit.h
88245@@ -223,7 +223,7 @@ static inline void audit_ptrace(struct task_struct *t)
88246 extern unsigned int audit_serial(void);
88247 extern int auditsc_get_stamp(struct audit_context *ctx,
88248 struct timespec *t, unsigned int *serial);
88249-extern int audit_set_loginuid(kuid_t loginuid);
88250+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
88251
88252 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
88253 {
88254diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
88255index 576e463..28fd926 100644
88256--- a/include/linux/binfmts.h
88257+++ b/include/linux/binfmts.h
88258@@ -44,7 +44,7 @@ struct linux_binprm {
88259 unsigned interp_flags;
88260 unsigned interp_data;
88261 unsigned long loader, exec;
88262-};
88263+} __randomize_layout;
88264
88265 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
88266 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
88267@@ -77,8 +77,10 @@ struct linux_binfmt {
88268 int (*load_binary)(struct linux_binprm *);
88269 int (*load_shlib)(struct file *);
88270 int (*core_dump)(struct coredump_params *cprm);
88271+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
88272+ void (*handle_mmap)(struct file *);
88273 unsigned long min_coredump; /* minimal dump size */
88274-};
88275+} __do_const __randomize_layout;
88276
88277 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
88278
88279diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
88280index dbfbf49..10be372 100644
88281--- a/include/linux/bitmap.h
88282+++ b/include/linux/bitmap.h
88283@@ -299,7 +299,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
88284 return __bitmap_full(src, nbits);
88285 }
88286
88287-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
88288+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
88289 {
88290 if (small_const_nbits(nbits))
88291 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
88292diff --git a/include/linux/bitops.h b/include/linux/bitops.h
88293index 5d858e0..336c1d9 100644
88294--- a/include/linux/bitops.h
88295+++ b/include/linux/bitops.h
88296@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
88297 * @word: value to rotate
88298 * @shift: bits to roll
88299 */
88300-static inline __u32 rol32(__u32 word, unsigned int shift)
88301+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
88302 {
88303 return (word << shift) | (word >> (32 - shift));
88304 }
88305@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
88306 * @word: value to rotate
88307 * @shift: bits to roll
88308 */
88309-static inline __u32 ror32(__u32 word, unsigned int shift)
88310+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
88311 {
88312 return (word >> shift) | (word << (32 - shift));
88313 }
88314@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
88315 return (__s32)(value << shift) >> shift;
88316 }
88317
88318-static inline unsigned fls_long(unsigned long l)
88319+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
88320 {
88321 if (sizeof(l) == 4)
88322 return fls(l);
88323diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
88324index 7f9a516..8889453 100644
88325--- a/include/linux/blkdev.h
88326+++ b/include/linux/blkdev.h
88327@@ -1616,7 +1616,7 @@ struct block_device_operations {
88328 /* this callback is with swap_lock and sometimes page table lock held */
88329 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
88330 struct module *owner;
88331-};
88332+} __do_const;
88333
88334 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
88335 unsigned long);
88336diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
88337index afc1343..9735539 100644
88338--- a/include/linux/blktrace_api.h
88339+++ b/include/linux/blktrace_api.h
88340@@ -25,7 +25,7 @@ struct blk_trace {
88341 struct dentry *dropped_file;
88342 struct dentry *msg_file;
88343 struct list_head running_list;
88344- atomic_t dropped;
88345+ atomic_unchecked_t dropped;
88346 };
88347
88348 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
88349diff --git a/include/linux/cache.h b/include/linux/cache.h
88350index 17e7e82..1d7da26 100644
88351--- a/include/linux/cache.h
88352+++ b/include/linux/cache.h
88353@@ -16,6 +16,14 @@
88354 #define __read_mostly
88355 #endif
88356
88357+#ifndef __read_only
88358+#ifdef CONFIG_PAX_KERNEXEC
88359+#error KERNEXEC requires __read_only
88360+#else
88361+#define __read_only __read_mostly
88362+#endif
88363+#endif
88364+
88365 #ifndef ____cacheline_aligned
88366 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
88367 #endif
88368diff --git a/include/linux/capability.h b/include/linux/capability.h
88369index aa93e5e..985a1b0 100644
88370--- a/include/linux/capability.h
88371+++ b/include/linux/capability.h
88372@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
88373 extern bool capable(int cap);
88374 extern bool ns_capable(struct user_namespace *ns, int cap);
88375 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
88376+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
88377 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
88378+extern bool capable_nolog(int cap);
88379+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
88380
88381 /* audit system wants to get cap info from files as well */
88382 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
88383
88384+extern int is_privileged_binary(const struct dentry *dentry);
88385+
88386 #endif /* !_LINUX_CAPABILITY_H */
88387diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
88388index 8609d57..86e4d79 100644
88389--- a/include/linux/cdrom.h
88390+++ b/include/linux/cdrom.h
88391@@ -87,7 +87,6 @@ struct cdrom_device_ops {
88392
88393 /* driver specifications */
88394 const int capability; /* capability flags */
88395- int n_minors; /* number of active minor devices */
88396 /* handle uniform packets for scsi type devices (scsi,atapi) */
88397 int (*generic_packet) (struct cdrom_device_info *,
88398 struct packet_command *);
88399diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
88400index 4ce9056..86caac6 100644
88401--- a/include/linux/cleancache.h
88402+++ b/include/linux/cleancache.h
88403@@ -31,7 +31,7 @@ struct cleancache_ops {
88404 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
88405 void (*invalidate_inode)(int, struct cleancache_filekey);
88406 void (*invalidate_fs)(int);
88407-};
88408+} __no_const;
88409
88410 extern struct cleancache_ops *
88411 cleancache_register_ops(struct cleancache_ops *ops);
88412diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
88413index 5591ea7..61b77ce 100644
88414--- a/include/linux/clk-provider.h
88415+++ b/include/linux/clk-provider.h
88416@@ -195,6 +195,7 @@ struct clk_ops {
88417 void (*init)(struct clk_hw *hw);
88418 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
88419 };
88420+typedef struct clk_ops __no_const clk_ops_no_const;
88421
88422 /**
88423 * struct clk_init_data - holds init data that's common to all clocks and is
88424diff --git a/include/linux/compat.h b/include/linux/compat.h
88425index ab25814..d1540d1 100644
88426--- a/include/linux/compat.h
88427+++ b/include/linux/compat.h
88428@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
88429 compat_size_t __user *len_ptr);
88430
88431 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
88432-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
88433+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
88434 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
88435 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
88436 compat_ssize_t msgsz, int msgflg);
88437@@ -325,7 +325,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
88438 long compat_sys_msgctl(int first, int second, void __user *uptr);
88439 long compat_sys_shmctl(int first, int second, void __user *uptr);
88440 long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
88441- unsigned nsems, const struct compat_timespec __user *timeout);
88442+ compat_long_t nsems, const struct compat_timespec __user *timeout);
88443 asmlinkage long compat_sys_keyctl(u32 option,
88444 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
88445 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
88446@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
88447 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
88448 compat_ulong_t addr, compat_ulong_t data);
88449 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
88450- compat_long_t addr, compat_long_t data);
88451+ compat_ulong_t addr, compat_ulong_t data);
88452
88453 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
88454 /*
88455diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
88456index 769e198..f670585 100644
88457--- a/include/linux/compiler-gcc4.h
88458+++ b/include/linux/compiler-gcc4.h
88459@@ -39,9 +39,34 @@
88460 # define __compiletime_warning(message) __attribute__((warning(message)))
88461 # define __compiletime_error(message) __attribute__((error(message)))
88462 #endif /* __CHECKER__ */
88463+
88464+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
88465+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
88466+#define __bos0(ptr) __bos((ptr), 0)
88467+#define __bos1(ptr) __bos((ptr), 1)
88468 #endif /* GCC_VERSION >= 40300 */
88469
88470 #if GCC_VERSION >= 40500
88471+
88472+#ifdef RANDSTRUCT_PLUGIN
88473+#define __randomize_layout __attribute__((randomize_layout))
88474+#define __no_randomize_layout __attribute__((no_randomize_layout))
88475+#endif
88476+
88477+#ifdef CONSTIFY_PLUGIN
88478+#define __no_const __attribute__((no_const))
88479+#define __do_const __attribute__((do_const))
88480+#endif
88481+
88482+#ifdef SIZE_OVERFLOW_PLUGIN
88483+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
88484+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
88485+#endif
88486+
88487+#ifdef LATENT_ENTROPY_PLUGIN
88488+#define __latent_entropy __attribute__((latent_entropy))
88489+#endif
88490+
88491 /*
88492 * Mark a position in code as unreachable. This can be used to
88493 * suppress control flow warnings after asm blocks that transfer
88494diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
88495index efee493..06f9f63 100644
88496--- a/include/linux/compiler-gcc5.h
88497+++ b/include/linux/compiler-gcc5.h
88498@@ -28,6 +28,30 @@
88499 # define __compiletime_error(message) __attribute__((error(message)))
88500 #endif /* __CHECKER__ */
88501
88502+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
88503+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
88504+#define __bos0(ptr) __bos((ptr), 0)
88505+#define __bos1(ptr) __bos((ptr), 1)
88506+
88507+#ifdef RANDSTRUCT_PLUGIN
88508+#define __randomize_layout __attribute__((randomize_layout))
88509+#define __no_randomize_layout __attribute__((no_randomize_layout))
88510+#endif
88511+
88512+#ifdef CONSTIFY_PLUGIN
88513+#define __no_const __attribute__((no_const))
88514+#define __do_const __attribute__((do_const))
88515+#endif
88516+
88517+#ifdef SIZE_OVERFLOW_PLUGIN
88518+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
88519+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
88520+#endif
88521+
88522+#ifdef LATENT_ENTROPY_PLUGIN
88523+#define __latent_entropy __attribute__((latent_entropy))
88524+#endif
88525+
88526 /*
88527 * Mark a position in code as unreachable. This can be used to
88528 * suppress control flow warnings after asm blocks that transfer
88529diff --git a/include/linux/compiler.h b/include/linux/compiler.h
88530index 1b45e4a..33028cd 100644
88531--- a/include/linux/compiler.h
88532+++ b/include/linux/compiler.h
88533@@ -5,11 +5,14 @@
88534
88535 #ifdef __CHECKER__
88536 # define __user __attribute__((noderef, address_space(1)))
88537+# define __force_user __force __user
88538 # define __kernel __attribute__((address_space(0)))
88539+# define __force_kernel __force __kernel
88540 # define __safe __attribute__((safe))
88541 # define __force __attribute__((force))
88542 # define __nocast __attribute__((nocast))
88543 # define __iomem __attribute__((noderef, address_space(2)))
88544+# define __force_iomem __force __iomem
88545 # define __must_hold(x) __attribute__((context(x,1,1)))
88546 # define __acquires(x) __attribute__((context(x,0,1)))
88547 # define __releases(x) __attribute__((context(x,1,0)))
88548@@ -17,20 +20,37 @@
88549 # define __release(x) __context__(x,-1)
88550 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
88551 # define __percpu __attribute__((noderef, address_space(3)))
88552+# define __force_percpu __force __percpu
88553 #ifdef CONFIG_SPARSE_RCU_POINTER
88554 # define __rcu __attribute__((noderef, address_space(4)))
88555+# define __force_rcu __force __rcu
88556 #else
88557 # define __rcu
88558+# define __force_rcu
88559 #endif
88560 extern void __chk_user_ptr(const volatile void __user *);
88561 extern void __chk_io_ptr(const volatile void __iomem *);
88562 #else
88563-# define __user
88564-# define __kernel
88565+# ifdef CHECKER_PLUGIN
88566+//# define __user
88567+//# define __force_user
88568+//# define __kernel
88569+//# define __force_kernel
88570+# else
88571+# ifdef STRUCTLEAK_PLUGIN
88572+# define __user __attribute__((user))
88573+# else
88574+# define __user
88575+# endif
88576+# define __force_user
88577+# define __kernel
88578+# define __force_kernel
88579+# endif
88580 # define __safe
88581 # define __force
88582 # define __nocast
88583 # define __iomem
88584+# define __force_iomem
88585 # define __chk_user_ptr(x) (void)0
88586 # define __chk_io_ptr(x) (void)0
88587 # define __builtin_warning(x, y...) (1)
88588@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
88589 # define __release(x) (void)0
88590 # define __cond_lock(x,c) (c)
88591 # define __percpu
88592+# define __force_percpu
88593 # define __rcu
88594+# define __force_rcu
88595 #endif
88596
88597 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
88598@@ -205,32 +227,32 @@ static __always_inline void data_access_exceeds_word_size(void)
88599 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
88600 {
88601 switch (size) {
88602- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
88603- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
88604- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
88605+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
88606+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
88607+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
88608 #ifdef CONFIG_64BIT
88609- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
88610+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
88611 #endif
88612 default:
88613 barrier();
88614- __builtin_memcpy((void *)res, (const void *)p, size);
88615+ __builtin_memcpy(res, (const void *)p, size);
88616 data_access_exceeds_word_size();
88617 barrier();
88618 }
88619 }
88620
88621-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
88622+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
88623 {
88624 switch (size) {
88625- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
88626- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
88627- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
88628+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
88629+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
88630+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
88631 #ifdef CONFIG_64BIT
88632- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
88633+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
88634 #endif
88635 default:
88636 barrier();
88637- __builtin_memcpy((void *)p, (const void *)res, size);
88638+ __builtin_memcpy((void *)p, res, size);
88639 data_access_exceeds_word_size();
88640 barrier();
88641 }
88642@@ -364,6 +386,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
88643 # define __attribute_const__ /* unimplemented */
88644 #endif
88645
88646+#ifndef __randomize_layout
88647+# define __randomize_layout
88648+#endif
88649+
88650+#ifndef __no_randomize_layout
88651+# define __no_randomize_layout
88652+#endif
88653+
88654+#ifndef __no_const
88655+# define __no_const
88656+#endif
88657+
88658+#ifndef __do_const
88659+# define __do_const
88660+#endif
88661+
88662+#ifndef __size_overflow
88663+# define __size_overflow(...)
88664+#endif
88665+
88666+#ifndef __intentional_overflow
88667+# define __intentional_overflow(...)
88668+#endif
88669+
88670+#ifndef __latent_entropy
88671+# define __latent_entropy
88672+#endif
88673+
88674 /*
88675 * Tell gcc if a function is cold. The compiler will assume any path
88676 * directly leading to the call is unlikely.
88677@@ -373,6 +423,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
88678 #define __cold
88679 #endif
88680
88681+#ifndef __alloc_size
88682+#define __alloc_size(...)
88683+#endif
88684+
88685+#ifndef __bos
88686+#define __bos(ptr, arg)
88687+#endif
88688+
88689+#ifndef __bos0
88690+#define __bos0(ptr)
88691+#endif
88692+
88693+#ifndef __bos1
88694+#define __bos1(ptr)
88695+#endif
88696+
88697 /* Simple shorthand for a section definition */
88698 #ifndef __section
88699 # define __section(S) __attribute__ ((__section__(#S)))
88700@@ -387,6 +453,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
88701 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
88702 #endif
88703
88704+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
88705+
88706 /* Is this type a native word size -- useful for atomic operations */
88707 #ifndef __native_word
88708 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
88709@@ -466,8 +534,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
88710 */
88711 #define __ACCESS_ONCE(x) ({ \
88712 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
88713- (volatile typeof(x) *)&(x); })
88714+ (volatile const typeof(x) *)&(x); })
88715 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
88716+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
88717
88718 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
88719 #ifdef CONFIG_KPROBES
88720diff --git a/include/linux/completion.h b/include/linux/completion.h
88721index 5d5aaae..0ea9b84 100644
88722--- a/include/linux/completion.h
88723+++ b/include/linux/completion.h
88724@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
88725
88726 extern void wait_for_completion(struct completion *);
88727 extern void wait_for_completion_io(struct completion *);
88728-extern int wait_for_completion_interruptible(struct completion *x);
88729-extern int wait_for_completion_killable(struct completion *x);
88730+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
88731+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
88732 extern unsigned long wait_for_completion_timeout(struct completion *x,
88733- unsigned long timeout);
88734+ unsigned long timeout) __intentional_overflow(-1);
88735 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
88736- unsigned long timeout);
88737+ unsigned long timeout) __intentional_overflow(-1);
88738 extern long wait_for_completion_interruptible_timeout(
88739- struct completion *x, unsigned long timeout);
88740+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
88741 extern long wait_for_completion_killable_timeout(
88742- struct completion *x, unsigned long timeout);
88743+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
88744 extern bool try_wait_for_completion(struct completion *x);
88745 extern bool completion_done(struct completion *x);
88746
88747diff --git a/include/linux/configfs.h b/include/linux/configfs.h
88748index 34025df..d94bbbc 100644
88749--- a/include/linux/configfs.h
88750+++ b/include/linux/configfs.h
88751@@ -125,7 +125,7 @@ struct configfs_attribute {
88752 const char *ca_name;
88753 struct module *ca_owner;
88754 umode_t ca_mode;
88755-};
88756+} __do_const;
88757
88758 /*
88759 * Users often need to create attribute structures for their configurable
88760diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
88761index 2ee4888..0451f5e 100644
88762--- a/include/linux/cpufreq.h
88763+++ b/include/linux/cpufreq.h
88764@@ -207,6 +207,7 @@ struct global_attr {
88765 ssize_t (*store)(struct kobject *a, struct attribute *b,
88766 const char *c, size_t count);
88767 };
88768+typedef struct global_attr __no_const global_attr_no_const;
88769
88770 #define define_one_global_ro(_name) \
88771 static struct global_attr _name = \
88772@@ -278,7 +279,7 @@ struct cpufreq_driver {
88773 bool boost_supported;
88774 bool boost_enabled;
88775 int (*set_boost)(int state);
88776-};
88777+} __do_const;
88778
88779 /* flags */
88780 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
88781diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
88782index 9c5e892..feb34e0 100644
88783--- a/include/linux/cpuidle.h
88784+++ b/include/linux/cpuidle.h
88785@@ -59,7 +59,8 @@ struct cpuidle_state {
88786 void (*enter_freeze) (struct cpuidle_device *dev,
88787 struct cpuidle_driver *drv,
88788 int index);
88789-};
88790+} __do_const;
88791+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
88792
88793 /* Idle State Flags */
88794 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
88795@@ -227,7 +228,7 @@ struct cpuidle_governor {
88796 void (*reflect) (struct cpuidle_device *dev, int index);
88797
88798 struct module *owner;
88799-};
88800+} __do_const;
88801
88802 #ifdef CONFIG_CPU_IDLE
88803 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
88804diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
88805index 086549a..a572d94 100644
88806--- a/include/linux/cpumask.h
88807+++ b/include/linux/cpumask.h
88808@@ -126,17 +126,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
88809 }
88810
88811 /* Valid inputs for n are -1 and 0. */
88812-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
88813+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
88814 {
88815 return n+1;
88816 }
88817
88818-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
88819+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
88820 {
88821 return n+1;
88822 }
88823
88824-static inline unsigned int cpumask_next_and(int n,
88825+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
88826 const struct cpumask *srcp,
88827 const struct cpumask *andp)
88828 {
88829@@ -182,7 +182,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
88830 *
88831 * Returns >= nr_cpu_ids if no further cpus set.
88832 */
88833-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
88834+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
88835 {
88836 /* -1 is a legal arg here. */
88837 if (n != -1)
88838@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
88839 *
88840 * Returns >= nr_cpu_ids if no further cpus unset.
88841 */
88842-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
88843+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
88844 {
88845 /* -1 is a legal arg here. */
88846 if (n != -1)
88847@@ -205,7 +205,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
88848 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
88849 }
88850
88851-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
88852+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
88853 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
88854 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
88855
88856@@ -472,7 +472,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
88857 * cpumask_weight - Count of bits in *srcp
88858 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
88859 */
88860-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
88861+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
88862 {
88863 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
88864 }
88865diff --git a/include/linux/cred.h b/include/linux/cred.h
88866index 2fb2ca2..d6a3340 100644
88867--- a/include/linux/cred.h
88868+++ b/include/linux/cred.h
88869@@ -35,7 +35,7 @@ struct group_info {
88870 int nblocks;
88871 kgid_t small_block[NGROUPS_SMALL];
88872 kgid_t *blocks[0];
88873-};
88874+} __randomize_layout;
88875
88876 /**
88877 * get_group_info - Get a reference to a group info structure
88878@@ -137,7 +137,7 @@ struct cred {
88879 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
88880 struct group_info *group_info; /* supplementary groups for euid/fsgid */
88881 struct rcu_head rcu; /* RCU deletion hook */
88882-};
88883+} __randomize_layout;
88884
88885 extern void __put_cred(struct cred *);
88886 extern void exit_creds(struct task_struct *);
88887@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
88888 static inline void validate_process_creds(void)
88889 {
88890 }
88891+static inline void validate_task_creds(struct task_struct *task)
88892+{
88893+}
88894 #endif
88895
88896 /**
88897@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
88898
88899 #define task_uid(task) (task_cred_xxx((task), uid))
88900 #define task_euid(task) (task_cred_xxx((task), euid))
88901+#define task_securebits(task) (task_cred_xxx((task), securebits))
88902
88903 #define current_cred_xxx(xxx) \
88904 ({ \
88905diff --git a/include/linux/crypto.h b/include/linux/crypto.h
88906index fb5ef16..05d1e59 100644
88907--- a/include/linux/crypto.h
88908+++ b/include/linux/crypto.h
88909@@ -626,7 +626,7 @@ struct cipher_tfm {
88910 const u8 *key, unsigned int keylen);
88911 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
88912 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
88913-};
88914+} __no_const;
88915
88916 struct hash_tfm {
88917 int (*init)(struct hash_desc *desc);
88918@@ -647,13 +647,13 @@ struct compress_tfm {
88919 int (*cot_decompress)(struct crypto_tfm *tfm,
88920 const u8 *src, unsigned int slen,
88921 u8 *dst, unsigned int *dlen);
88922-};
88923+} __no_const;
88924
88925 struct rng_tfm {
88926 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
88927 unsigned int dlen);
88928 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
88929-};
88930+} __no_const;
88931
88932 #define crt_ablkcipher crt_u.ablkcipher
88933 #define crt_aead crt_u.aead
88934diff --git a/include/linux/ctype.h b/include/linux/ctype.h
88935index 653589e..4ef254a 100644
88936--- a/include/linux/ctype.h
88937+++ b/include/linux/ctype.h
88938@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
88939 * Fast implementation of tolower() for internal usage. Do not use in your
88940 * code.
88941 */
88942-static inline char _tolower(const char c)
88943+static inline unsigned char _tolower(const unsigned char c)
88944 {
88945 return c | 0x20;
88946 }
88947diff --git a/include/linux/dcache.h b/include/linux/dcache.h
88948index d835879..c8e5b92 100644
88949--- a/include/linux/dcache.h
88950+++ b/include/linux/dcache.h
88951@@ -123,6 +123,9 @@ struct dentry {
88952 unsigned long d_time; /* used by d_revalidate */
88953 void *d_fsdata; /* fs-specific data */
88954
88955+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
88956+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
88957+#endif
88958 struct list_head d_lru; /* LRU list */
88959 struct list_head d_child; /* child of parent list */
88960 struct list_head d_subdirs; /* our children */
88961@@ -133,7 +136,7 @@ struct dentry {
88962 struct hlist_node d_alias; /* inode alias list */
88963 struct rcu_head d_rcu;
88964 } d_u;
88965-};
88966+} __randomize_layout;
88967
88968 /*
88969 * dentry->d_lock spinlock nesting subclasses:
88970@@ -319,7 +322,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
88971
88972 static inline unsigned d_count(const struct dentry *dentry)
88973 {
88974- return dentry->d_lockref.count;
88975+ return __lockref_read(&dentry->d_lockref);
88976 }
88977
88978 /*
88979@@ -347,7 +350,7 @@ extern char *dentry_path(struct dentry *, char *, int);
88980 static inline struct dentry *dget_dlock(struct dentry *dentry)
88981 {
88982 if (dentry)
88983- dentry->d_lockref.count++;
88984+ __lockref_inc(&dentry->d_lockref);
88985 return dentry;
88986 }
88987
88988diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
88989index 7925bf0..d5143d2 100644
88990--- a/include/linux/decompress/mm.h
88991+++ b/include/linux/decompress/mm.h
88992@@ -77,7 +77,7 @@ static void free(void *where)
88993 * warnings when not needed (indeed large_malloc / large_free are not
88994 * needed by inflate */
88995
88996-#define malloc(a) kmalloc(a, GFP_KERNEL)
88997+#define malloc(a) kmalloc((a), GFP_KERNEL)
88998 #define free(a) kfree(a)
88999
89000 #define large_malloc(a) vmalloc(a)
89001diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
89002index ce447f0..83c66bd 100644
89003--- a/include/linux/devfreq.h
89004+++ b/include/linux/devfreq.h
89005@@ -114,7 +114,7 @@ struct devfreq_governor {
89006 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
89007 int (*event_handler)(struct devfreq *devfreq,
89008 unsigned int event, void *data);
89009-};
89010+} __do_const;
89011
89012 /**
89013 * struct devfreq - Device devfreq structure
89014diff --git a/include/linux/device.h b/include/linux/device.h
89015index 0eb8ee2..c603b6a 100644
89016--- a/include/linux/device.h
89017+++ b/include/linux/device.h
89018@@ -311,7 +311,7 @@ struct subsys_interface {
89019 struct list_head node;
89020 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
89021 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
89022-};
89023+} __do_const;
89024
89025 int subsys_interface_register(struct subsys_interface *sif);
89026 void subsys_interface_unregister(struct subsys_interface *sif);
89027@@ -507,7 +507,7 @@ struct device_type {
89028 void (*release)(struct device *dev);
89029
89030 const struct dev_pm_ops *pm;
89031-};
89032+} __do_const;
89033
89034 /* interface for exporting device attributes */
89035 struct device_attribute {
89036@@ -517,11 +517,12 @@ struct device_attribute {
89037 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
89038 const char *buf, size_t count);
89039 };
89040+typedef struct device_attribute __no_const device_attribute_no_const;
89041
89042 struct dev_ext_attribute {
89043 struct device_attribute attr;
89044 void *var;
89045-};
89046+} __do_const;
89047
89048 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
89049 char *buf);
89050diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
89051index c3007cb..43efc8c 100644
89052--- a/include/linux/dma-mapping.h
89053+++ b/include/linux/dma-mapping.h
89054@@ -60,7 +60,7 @@ struct dma_map_ops {
89055 u64 (*get_required_mask)(struct device *dev);
89056 #endif
89057 int is_phys;
89058-};
89059+} __do_const;
89060
89061 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
89062
89063diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
89064index b6997a0..108be6c 100644
89065--- a/include/linux/dmaengine.h
89066+++ b/include/linux/dmaengine.h
89067@@ -1133,9 +1133,9 @@ struct dma_pinned_list {
89068 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
89069 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
89070
89071-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
89072+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
89073 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
89074-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
89075+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
89076 struct dma_pinned_list *pinned_list, struct page *page,
89077 unsigned int offset, size_t len);
89078
89079diff --git a/include/linux/efi.h b/include/linux/efi.h
89080index cf7e431..d239dce 100644
89081--- a/include/linux/efi.h
89082+++ b/include/linux/efi.h
89083@@ -1056,6 +1056,7 @@ struct efivar_operations {
89084 efi_set_variable_nonblocking_t *set_variable_nonblocking;
89085 efi_query_variable_store_t *query_variable_store;
89086 };
89087+typedef struct efivar_operations __no_const efivar_operations_no_const;
89088
89089 struct efivars {
89090 /*
89091diff --git a/include/linux/elf.h b/include/linux/elf.h
89092index 20fa8d8..3d0dd18 100644
89093--- a/include/linux/elf.h
89094+++ b/include/linux/elf.h
89095@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
89096 #define elf_note elf32_note
89097 #define elf_addr_t Elf32_Off
89098 #define Elf_Half Elf32_Half
89099+#define elf_dyn Elf32_Dyn
89100
89101 #else
89102
89103@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
89104 #define elf_note elf64_note
89105 #define elf_addr_t Elf64_Off
89106 #define Elf_Half Elf64_Half
89107+#define elf_dyn Elf64_Dyn
89108
89109 #endif
89110
89111diff --git a/include/linux/err.h b/include/linux/err.h
89112index a729120..6ede2c9 100644
89113--- a/include/linux/err.h
89114+++ b/include/linux/err.h
89115@@ -20,12 +20,12 @@
89116
89117 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
89118
89119-static inline void * __must_check ERR_PTR(long error)
89120+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
89121 {
89122 return (void *) error;
89123 }
89124
89125-static inline long __must_check PTR_ERR(__force const void *ptr)
89126+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
89127 {
89128 return (long) ptr;
89129 }
89130diff --git a/include/linux/extcon.h b/include/linux/extcon.h
89131index 36f49c4..a2a1f4c 100644
89132--- a/include/linux/extcon.h
89133+++ b/include/linux/extcon.h
89134@@ -135,7 +135,7 @@ struct extcon_dev {
89135 /* /sys/class/extcon/.../mutually_exclusive/... */
89136 struct attribute_group attr_g_muex;
89137 struct attribute **attrs_muex;
89138- struct device_attribute *d_attrs_muex;
89139+ device_attribute_no_const *d_attrs_muex;
89140 };
89141
89142 /**
89143diff --git a/include/linux/fb.h b/include/linux/fb.h
89144index 043f328..180ccbf 100644
89145--- a/include/linux/fb.h
89146+++ b/include/linux/fb.h
89147@@ -305,7 +305,8 @@ struct fb_ops {
89148 /* called at KDB enter and leave time to prepare the console */
89149 int (*fb_debug_enter)(struct fb_info *info);
89150 int (*fb_debug_leave)(struct fb_info *info);
89151-};
89152+} __do_const;
89153+typedef struct fb_ops __no_const fb_ops_no_const;
89154
89155 #ifdef CONFIG_FB_TILEBLITTING
89156 #define FB_TILE_CURSOR_NONE 0
89157diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
89158index 230f87b..1fd0485 100644
89159--- a/include/linux/fdtable.h
89160+++ b/include/linux/fdtable.h
89161@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
89162 void put_files_struct(struct files_struct *fs);
89163 void reset_files_struct(struct files_struct *);
89164 int unshare_files(struct files_struct **);
89165-struct files_struct *dup_fd(struct files_struct *, int *);
89166+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
89167 void do_close_on_exec(struct files_struct *);
89168 int iterate_fd(struct files_struct *, unsigned,
89169 int (*)(const void *, struct file *, unsigned),
89170diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
89171index 8293262..2b3b8bd 100644
89172--- a/include/linux/frontswap.h
89173+++ b/include/linux/frontswap.h
89174@@ -11,7 +11,7 @@ struct frontswap_ops {
89175 int (*load)(unsigned, pgoff_t, struct page *);
89176 void (*invalidate_page)(unsigned, pgoff_t);
89177 void (*invalidate_area)(unsigned);
89178-};
89179+} __no_const;
89180
89181 extern bool frontswap_enabled;
89182 extern struct frontswap_ops *
89183diff --git a/include/linux/fs.h b/include/linux/fs.h
89184index 52cc449..58b25c9 100644
89185--- a/include/linux/fs.h
89186+++ b/include/linux/fs.h
89187@@ -410,7 +410,7 @@ struct address_space {
89188 spinlock_t private_lock; /* for use by the address_space */
89189 struct list_head private_list; /* ditto */
89190 void *private_data; /* ditto */
89191-} __attribute__((aligned(sizeof(long))));
89192+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
89193 /*
89194 * On most architectures that alignment is already the case; but
89195 * must be enforced here for CRIS, to let the least significant bit
89196@@ -453,7 +453,7 @@ struct block_device {
89197 int bd_fsfreeze_count;
89198 /* Mutex for freeze */
89199 struct mutex bd_fsfreeze_mutex;
89200-};
89201+} __randomize_layout;
89202
89203 /*
89204 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
89205@@ -639,7 +639,7 @@ struct inode {
89206 #endif
89207
89208 void *i_private; /* fs or device private pointer */
89209-};
89210+} __randomize_layout;
89211
89212 static inline int inode_unhashed(struct inode *inode)
89213 {
89214@@ -834,7 +834,7 @@ struct file {
89215 struct list_head f_tfile_llink;
89216 #endif /* #ifdef CONFIG_EPOLL */
89217 struct address_space *f_mapping;
89218-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
89219+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
89220
89221 struct file_handle {
89222 __u32 handle_bytes;
89223@@ -962,7 +962,7 @@ struct file_lock {
89224 int state; /* state of grant or error if -ve */
89225 } afs;
89226 } fl_u;
89227-};
89228+} __randomize_layout;
89229
89230 struct file_lock_context {
89231 spinlock_t flc_lock;
89232@@ -1316,7 +1316,7 @@ struct super_block {
89233 * Indicates how deep in a filesystem stack this SB is
89234 */
89235 int s_stack_depth;
89236-};
89237+} __randomize_layout;
89238
89239 extern struct timespec current_fs_time(struct super_block *sb);
89240
89241@@ -1570,7 +1570,8 @@ struct file_operations {
89242 #ifndef CONFIG_MMU
89243 unsigned (*mmap_capabilities)(struct file *);
89244 #endif
89245-};
89246+} __do_const __randomize_layout;
89247+typedef struct file_operations __no_const file_operations_no_const;
89248
89249 struct inode_operations {
89250 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
89251@@ -2918,4 +2919,14 @@ static inline bool dir_relax(struct inode *inode)
89252 return !IS_DEADDIR(inode);
89253 }
89254
89255+static inline bool is_sidechannel_device(const struct inode *inode)
89256+{
89257+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
89258+ umode_t mode = inode->i_mode;
89259+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
89260+#else
89261+ return false;
89262+#endif
89263+}
89264+
89265 #endif /* _LINUX_FS_H */
89266diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
89267index 0efc3e6..fd23610 100644
89268--- a/include/linux/fs_struct.h
89269+++ b/include/linux/fs_struct.h
89270@@ -6,13 +6,13 @@
89271 #include <linux/seqlock.h>
89272
89273 struct fs_struct {
89274- int users;
89275+ atomic_t users;
89276 spinlock_t lock;
89277 seqcount_t seq;
89278 int umask;
89279 int in_exec;
89280 struct path root, pwd;
89281-};
89282+} __randomize_layout;
89283
89284 extern struct kmem_cache *fs_cachep;
89285
89286diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
89287index 7714849..a4a5c7a 100644
89288--- a/include/linux/fscache-cache.h
89289+++ b/include/linux/fscache-cache.h
89290@@ -113,7 +113,7 @@ struct fscache_operation {
89291 fscache_operation_release_t release;
89292 };
89293
89294-extern atomic_t fscache_op_debug_id;
89295+extern atomic_unchecked_t fscache_op_debug_id;
89296 extern void fscache_op_work_func(struct work_struct *work);
89297
89298 extern void fscache_enqueue_operation(struct fscache_operation *);
89299@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
89300 INIT_WORK(&op->work, fscache_op_work_func);
89301 atomic_set(&op->usage, 1);
89302 op->state = FSCACHE_OP_ST_INITIALISED;
89303- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
89304+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
89305 op->processor = processor;
89306 op->release = release;
89307 INIT_LIST_HEAD(&op->pend_link);
89308diff --git a/include/linux/fscache.h b/include/linux/fscache.h
89309index 115bb81..e7b812b 100644
89310--- a/include/linux/fscache.h
89311+++ b/include/linux/fscache.h
89312@@ -152,7 +152,7 @@ struct fscache_cookie_def {
89313 * - this is mandatory for any object that may have data
89314 */
89315 void (*now_uncached)(void *cookie_netfs_data);
89316-};
89317+} __do_const;
89318
89319 /*
89320 * fscache cached network filesystem type
89321diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
89322index 7ee1774..72505b8 100644
89323--- a/include/linux/fsnotify.h
89324+++ b/include/linux/fsnotify.h
89325@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
89326 struct inode *inode = file_inode(file);
89327 __u32 mask = FS_ACCESS;
89328
89329+ if (is_sidechannel_device(inode))
89330+ return;
89331+
89332 if (S_ISDIR(inode->i_mode))
89333 mask |= FS_ISDIR;
89334
89335@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
89336 struct inode *inode = file_inode(file);
89337 __u32 mask = FS_MODIFY;
89338
89339+ if (is_sidechannel_device(inode))
89340+ return;
89341+
89342 if (S_ISDIR(inode->i_mode))
89343 mask |= FS_ISDIR;
89344
89345@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
89346 */
89347 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
89348 {
89349- return kstrdup(name, GFP_KERNEL);
89350+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
89351 }
89352
89353 /*
89354diff --git a/include/linux/genhd.h b/include/linux/genhd.h
89355index ec274e0..e678159 100644
89356--- a/include/linux/genhd.h
89357+++ b/include/linux/genhd.h
89358@@ -194,7 +194,7 @@ struct gendisk {
89359 struct kobject *slave_dir;
89360
89361 struct timer_rand_state *random;
89362- atomic_t sync_io; /* RAID */
89363+ atomic_unchecked_t sync_io; /* RAID */
89364 struct disk_events *ev;
89365 #ifdef CONFIG_BLK_DEV_INTEGRITY
89366 struct blk_integrity *integrity;
89367@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
89368 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
89369
89370 /* drivers/char/random.c */
89371-extern void add_disk_randomness(struct gendisk *disk);
89372+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
89373 extern void rand_initialize_disk(struct gendisk *disk);
89374
89375 static inline sector_t get_start_sect(struct block_device *bdev)
89376diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
89377index 667c311..abac2a7 100644
89378--- a/include/linux/genl_magic_func.h
89379+++ b/include/linux/genl_magic_func.h
89380@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
89381 },
89382
89383 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
89384-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
89385+static struct genl_ops ZZZ_genl_ops[] = {
89386 #include GENL_MAGIC_INCLUDE_FILE
89387 };
89388
89389diff --git a/include/linux/gfp.h b/include/linux/gfp.h
89390index 51bd1e7..0486343 100644
89391--- a/include/linux/gfp.h
89392+++ b/include/linux/gfp.h
89393@@ -34,6 +34,13 @@ struct vm_area_struct;
89394 #define ___GFP_NO_KSWAPD 0x400000u
89395 #define ___GFP_OTHER_NODE 0x800000u
89396 #define ___GFP_WRITE 0x1000000u
89397+
89398+#ifdef CONFIG_PAX_USERCOPY_SLABS
89399+#define ___GFP_USERCOPY 0x2000000u
89400+#else
89401+#define ___GFP_USERCOPY 0
89402+#endif
89403+
89404 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
89405
89406 /*
89407@@ -90,6 +97,7 @@ struct vm_area_struct;
89408 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
89409 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
89410 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
89411+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
89412
89413 /*
89414 * This may seem redundant, but it's a way of annotating false positives vs.
89415@@ -97,7 +105,7 @@ struct vm_area_struct;
89416 */
89417 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
89418
89419-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
89420+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
89421 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
89422
89423 /* This equals 0, but use constants in case they ever change */
89424@@ -152,6 +160,8 @@ struct vm_area_struct;
89425 /* 4GB DMA on some platforms */
89426 #define GFP_DMA32 __GFP_DMA32
89427
89428+#define GFP_USERCOPY __GFP_USERCOPY
89429+
89430 /* Convert GFP flags to their corresponding migrate type */
89431 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
89432 {
89433diff --git a/include/linux/gracl.h b/include/linux/gracl.h
89434new file mode 100644
89435index 0000000..91858e4
89436--- /dev/null
89437+++ b/include/linux/gracl.h
89438@@ -0,0 +1,342 @@
89439+#ifndef GR_ACL_H
89440+#define GR_ACL_H
89441+
89442+#include <linux/grdefs.h>
89443+#include <linux/resource.h>
89444+#include <linux/capability.h>
89445+#include <linux/dcache.h>
89446+#include <asm/resource.h>
89447+
89448+/* Major status information */
89449+
89450+#define GR_VERSION "grsecurity 3.1"
89451+#define GRSECURITY_VERSION 0x3100
89452+
89453+enum {
89454+ GR_SHUTDOWN = 0,
89455+ GR_ENABLE = 1,
89456+ GR_SPROLE = 2,
89457+ GR_OLDRELOAD = 3,
89458+ GR_SEGVMOD = 4,
89459+ GR_STATUS = 5,
89460+ GR_UNSPROLE = 6,
89461+ GR_PASSSET = 7,
89462+ GR_SPROLEPAM = 8,
89463+ GR_RELOAD = 9,
89464+};
89465+
89466+/* Password setup definitions
89467+ * kernel/grhash.c */
89468+enum {
89469+ GR_PW_LEN = 128,
89470+ GR_SALT_LEN = 16,
89471+ GR_SHA_LEN = 32,
89472+};
89473+
89474+enum {
89475+ GR_SPROLE_LEN = 64,
89476+};
89477+
89478+enum {
89479+ GR_NO_GLOB = 0,
89480+ GR_REG_GLOB,
89481+ GR_CREATE_GLOB
89482+};
89483+
89484+#define GR_NLIMITS 32
89485+
89486+/* Begin Data Structures */
89487+
89488+struct sprole_pw {
89489+ unsigned char *rolename;
89490+ unsigned char salt[GR_SALT_LEN];
89491+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
89492+};
89493+
89494+struct name_entry {
89495+ __u32 key;
89496+ u64 inode;
89497+ dev_t device;
89498+ char *name;
89499+ __u16 len;
89500+ __u8 deleted;
89501+ struct name_entry *prev;
89502+ struct name_entry *next;
89503+};
89504+
89505+struct inodev_entry {
89506+ struct name_entry *nentry;
89507+ struct inodev_entry *prev;
89508+ struct inodev_entry *next;
89509+};
89510+
89511+struct acl_role_db {
89512+ struct acl_role_label **r_hash;
89513+ __u32 r_size;
89514+};
89515+
89516+struct inodev_db {
89517+ struct inodev_entry **i_hash;
89518+ __u32 i_size;
89519+};
89520+
89521+struct name_db {
89522+ struct name_entry **n_hash;
89523+ __u32 n_size;
89524+};
89525+
89526+struct crash_uid {
89527+ uid_t uid;
89528+ unsigned long expires;
89529+};
89530+
89531+struct gr_hash_struct {
89532+ void **table;
89533+ void **nametable;
89534+ void *first;
89535+ __u32 table_size;
89536+ __u32 used_size;
89537+ int type;
89538+};
89539+
89540+/* Userspace Grsecurity ACL data structures */
89541+
89542+struct acl_subject_label {
89543+ char *filename;
89544+ u64 inode;
89545+ dev_t device;
89546+ __u32 mode;
89547+ kernel_cap_t cap_mask;
89548+ kernel_cap_t cap_lower;
89549+ kernel_cap_t cap_invert_audit;
89550+
89551+ struct rlimit res[GR_NLIMITS];
89552+ __u32 resmask;
89553+
89554+ __u8 user_trans_type;
89555+ __u8 group_trans_type;
89556+ uid_t *user_transitions;
89557+ gid_t *group_transitions;
89558+ __u16 user_trans_num;
89559+ __u16 group_trans_num;
89560+
89561+ __u32 sock_families[2];
89562+ __u32 ip_proto[8];
89563+ __u32 ip_type;
89564+ struct acl_ip_label **ips;
89565+ __u32 ip_num;
89566+ __u32 inaddr_any_override;
89567+
89568+ __u32 crashes;
89569+ unsigned long expires;
89570+
89571+ struct acl_subject_label *parent_subject;
89572+ struct gr_hash_struct *hash;
89573+ struct acl_subject_label *prev;
89574+ struct acl_subject_label *next;
89575+
89576+ struct acl_object_label **obj_hash;
89577+ __u32 obj_hash_size;
89578+ __u16 pax_flags;
89579+};
89580+
89581+struct role_allowed_ip {
89582+ __u32 addr;
89583+ __u32 netmask;
89584+
89585+ struct role_allowed_ip *prev;
89586+ struct role_allowed_ip *next;
89587+};
89588+
89589+struct role_transition {
89590+ char *rolename;
89591+
89592+ struct role_transition *prev;
89593+ struct role_transition *next;
89594+};
89595+
89596+struct acl_role_label {
89597+ char *rolename;
89598+ uid_t uidgid;
89599+ __u16 roletype;
89600+
89601+ __u16 auth_attempts;
89602+ unsigned long expires;
89603+
89604+ struct acl_subject_label *root_label;
89605+ struct gr_hash_struct *hash;
89606+
89607+ struct acl_role_label *prev;
89608+ struct acl_role_label *next;
89609+
89610+ struct role_transition *transitions;
89611+ struct role_allowed_ip *allowed_ips;
89612+ uid_t *domain_children;
89613+ __u16 domain_child_num;
89614+
89615+ umode_t umask;
89616+
89617+ struct acl_subject_label **subj_hash;
89618+ __u32 subj_hash_size;
89619+};
89620+
89621+struct user_acl_role_db {
89622+ struct acl_role_label **r_table;
89623+ __u32 num_pointers; /* Number of allocations to track */
89624+ __u32 num_roles; /* Number of roles */
89625+ __u32 num_domain_children; /* Number of domain children */
89626+ __u32 num_subjects; /* Number of subjects */
89627+ __u32 num_objects; /* Number of objects */
89628+};
89629+
89630+struct acl_object_label {
89631+ char *filename;
89632+ u64 inode;
89633+ dev_t device;
89634+ __u32 mode;
89635+
89636+ struct acl_subject_label *nested;
89637+ struct acl_object_label *globbed;
89638+
89639+ /* next two structures not used */
89640+
89641+ struct acl_object_label *prev;
89642+ struct acl_object_label *next;
89643+};
89644+
89645+struct acl_ip_label {
89646+ char *iface;
89647+ __u32 addr;
89648+ __u32 netmask;
89649+ __u16 low, high;
89650+ __u8 mode;
89651+ __u32 type;
89652+ __u32 proto[8];
89653+
89654+ /* next two structures not used */
89655+
89656+ struct acl_ip_label *prev;
89657+ struct acl_ip_label *next;
89658+};
89659+
89660+struct gr_arg {
89661+ struct user_acl_role_db role_db;
89662+ unsigned char pw[GR_PW_LEN];
89663+ unsigned char salt[GR_SALT_LEN];
89664+ unsigned char sum[GR_SHA_LEN];
89665+ unsigned char sp_role[GR_SPROLE_LEN];
89666+ struct sprole_pw *sprole_pws;
89667+ dev_t segv_device;
89668+ u64 segv_inode;
89669+ uid_t segv_uid;
89670+ __u16 num_sprole_pws;
89671+ __u16 mode;
89672+};
89673+
89674+struct gr_arg_wrapper {
89675+ struct gr_arg *arg;
89676+ __u32 version;
89677+ __u32 size;
89678+};
89679+
89680+struct subject_map {
89681+ struct acl_subject_label *user;
89682+ struct acl_subject_label *kernel;
89683+ struct subject_map *prev;
89684+ struct subject_map *next;
89685+};
89686+
89687+struct acl_subj_map_db {
89688+ struct subject_map **s_hash;
89689+ __u32 s_size;
89690+};
89691+
89692+struct gr_policy_state {
89693+ struct sprole_pw **acl_special_roles;
89694+ __u16 num_sprole_pws;
89695+ struct acl_role_label *kernel_role;
89696+ struct acl_role_label *role_list;
89697+ struct acl_role_label *default_role;
89698+ struct acl_role_db acl_role_set;
89699+ struct acl_subj_map_db subj_map_set;
89700+ struct name_db name_set;
89701+ struct inodev_db inodev_set;
89702+};
89703+
89704+struct gr_alloc_state {
89705+ unsigned long alloc_stack_next;
89706+ unsigned long alloc_stack_size;
89707+ void **alloc_stack;
89708+};
89709+
89710+struct gr_reload_state {
89711+ struct gr_policy_state oldpolicy;
89712+ struct gr_alloc_state oldalloc;
89713+ struct gr_policy_state newpolicy;
89714+ struct gr_alloc_state newalloc;
89715+ struct gr_policy_state *oldpolicy_ptr;
89716+ struct gr_alloc_state *oldalloc_ptr;
89717+ unsigned char oldmode;
89718+};
89719+
89720+/* End Data Structures Section */
89721+
89722+/* Hash functions generated by empirical testing by Brad Spengler
89723+ Makes good use of the low bits of the inode. Generally 0-1 times
89724+ in loop for successful match. 0-3 for unsuccessful match.
89725+ Shift/add algorithm with modulus of table size and an XOR*/
89726+
89727+static __inline__ unsigned int
89728+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
89729+{
89730+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
89731+}
89732+
89733+ static __inline__ unsigned int
89734+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
89735+{
89736+ return ((const unsigned long)userp % sz);
89737+}
89738+
89739+static __inline__ unsigned int
89740+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
89741+{
89742+ unsigned int rem;
89743+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
89744+ return rem;
89745+}
89746+
89747+static __inline__ unsigned int
89748+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
89749+{
89750+ return full_name_hash((const unsigned char *)name, len) % sz;
89751+}
89752+
89753+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
89754+ subj = NULL; \
89755+ iter = 0; \
89756+ while (iter < role->subj_hash_size) { \
89757+ if (subj == NULL) \
89758+ subj = role->subj_hash[iter]; \
89759+ if (subj == NULL) { \
89760+ iter++; \
89761+ continue; \
89762+ }
89763+
89764+#define FOR_EACH_SUBJECT_END(subj,iter) \
89765+ subj = subj->next; \
89766+ if (subj == NULL) \
89767+ iter++; \
89768+ }
89769+
89770+
89771+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
89772+ subj = role->hash->first; \
89773+ while (subj != NULL) {
89774+
89775+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
89776+ subj = subj->next; \
89777+ }
89778+
89779+#endif
89780+
89781diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
89782new file mode 100644
89783index 0000000..af64092
89784--- /dev/null
89785+++ b/include/linux/gracl_compat.h
89786@@ -0,0 +1,156 @@
89787+#ifndef GR_ACL_COMPAT_H
89788+#define GR_ACL_COMPAT_H
89789+
89790+#include <linux/resource.h>
89791+#include <asm/resource.h>
89792+
89793+struct sprole_pw_compat {
89794+ compat_uptr_t rolename;
89795+ unsigned char salt[GR_SALT_LEN];
89796+ unsigned char sum[GR_SHA_LEN];
89797+};
89798+
89799+struct gr_hash_struct_compat {
89800+ compat_uptr_t table;
89801+ compat_uptr_t nametable;
89802+ compat_uptr_t first;
89803+ __u32 table_size;
89804+ __u32 used_size;
89805+ int type;
89806+};
89807+
89808+struct acl_subject_label_compat {
89809+ compat_uptr_t filename;
89810+ compat_u64 inode;
89811+ __u32 device;
89812+ __u32 mode;
89813+ kernel_cap_t cap_mask;
89814+ kernel_cap_t cap_lower;
89815+ kernel_cap_t cap_invert_audit;
89816+
89817+ struct compat_rlimit res[GR_NLIMITS];
89818+ __u32 resmask;
89819+
89820+ __u8 user_trans_type;
89821+ __u8 group_trans_type;
89822+ compat_uptr_t user_transitions;
89823+ compat_uptr_t group_transitions;
89824+ __u16 user_trans_num;
89825+ __u16 group_trans_num;
89826+
89827+ __u32 sock_families[2];
89828+ __u32 ip_proto[8];
89829+ __u32 ip_type;
89830+ compat_uptr_t ips;
89831+ __u32 ip_num;
89832+ __u32 inaddr_any_override;
89833+
89834+ __u32 crashes;
89835+ compat_ulong_t expires;
89836+
89837+ compat_uptr_t parent_subject;
89838+ compat_uptr_t hash;
89839+ compat_uptr_t prev;
89840+ compat_uptr_t next;
89841+
89842+ compat_uptr_t obj_hash;
89843+ __u32 obj_hash_size;
89844+ __u16 pax_flags;
89845+};
89846+
89847+struct role_allowed_ip_compat {
89848+ __u32 addr;
89849+ __u32 netmask;
89850+
89851+ compat_uptr_t prev;
89852+ compat_uptr_t next;
89853+};
89854+
89855+struct role_transition_compat {
89856+ compat_uptr_t rolename;
89857+
89858+ compat_uptr_t prev;
89859+ compat_uptr_t next;
89860+};
89861+
89862+struct acl_role_label_compat {
89863+ compat_uptr_t rolename;
89864+ uid_t uidgid;
89865+ __u16 roletype;
89866+
89867+ __u16 auth_attempts;
89868+ compat_ulong_t expires;
89869+
89870+ compat_uptr_t root_label;
89871+ compat_uptr_t hash;
89872+
89873+ compat_uptr_t prev;
89874+ compat_uptr_t next;
89875+
89876+ compat_uptr_t transitions;
89877+ compat_uptr_t allowed_ips;
89878+ compat_uptr_t domain_children;
89879+ __u16 domain_child_num;
89880+
89881+ umode_t umask;
89882+
89883+ compat_uptr_t subj_hash;
89884+ __u32 subj_hash_size;
89885+};
89886+
89887+struct user_acl_role_db_compat {
89888+ compat_uptr_t r_table;
89889+ __u32 num_pointers;
89890+ __u32 num_roles;
89891+ __u32 num_domain_children;
89892+ __u32 num_subjects;
89893+ __u32 num_objects;
89894+};
89895+
89896+struct acl_object_label_compat {
89897+ compat_uptr_t filename;
89898+ compat_u64 inode;
89899+ __u32 device;
89900+ __u32 mode;
89901+
89902+ compat_uptr_t nested;
89903+ compat_uptr_t globbed;
89904+
89905+ compat_uptr_t prev;
89906+ compat_uptr_t next;
89907+};
89908+
89909+struct acl_ip_label_compat {
89910+ compat_uptr_t iface;
89911+ __u32 addr;
89912+ __u32 netmask;
89913+ __u16 low, high;
89914+ __u8 mode;
89915+ __u32 type;
89916+ __u32 proto[8];
89917+
89918+ compat_uptr_t prev;
89919+ compat_uptr_t next;
89920+};
89921+
89922+struct gr_arg_compat {
89923+ struct user_acl_role_db_compat role_db;
89924+ unsigned char pw[GR_PW_LEN];
89925+ unsigned char salt[GR_SALT_LEN];
89926+ unsigned char sum[GR_SHA_LEN];
89927+ unsigned char sp_role[GR_SPROLE_LEN];
89928+ compat_uptr_t sprole_pws;
89929+ __u32 segv_device;
89930+ compat_u64 segv_inode;
89931+ uid_t segv_uid;
89932+ __u16 num_sprole_pws;
89933+ __u16 mode;
89934+};
89935+
89936+struct gr_arg_wrapper_compat {
89937+ compat_uptr_t arg;
89938+ __u32 version;
89939+ __u32 size;
89940+};
89941+
89942+#endif
89943diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
89944new file mode 100644
89945index 0000000..323ecf2
89946--- /dev/null
89947+++ b/include/linux/gralloc.h
89948@@ -0,0 +1,9 @@
89949+#ifndef __GRALLOC_H
89950+#define __GRALLOC_H
89951+
89952+void acl_free_all(void);
89953+int acl_alloc_stack_init(unsigned long size);
89954+void *acl_alloc(unsigned long len);
89955+void *acl_alloc_num(unsigned long num, unsigned long len);
89956+
89957+#endif
89958diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
89959new file mode 100644
89960index 0000000..be66033
89961--- /dev/null
89962+++ b/include/linux/grdefs.h
89963@@ -0,0 +1,140 @@
89964+#ifndef GRDEFS_H
89965+#define GRDEFS_H
89966+
89967+/* Begin grsecurity status declarations */
89968+
89969+enum {
89970+ GR_READY = 0x01,
89971+ GR_STATUS_INIT = 0x00 // disabled state
89972+};
89973+
89974+/* Begin ACL declarations */
89975+
89976+/* Role flags */
89977+
89978+enum {
89979+ GR_ROLE_USER = 0x0001,
89980+ GR_ROLE_GROUP = 0x0002,
89981+ GR_ROLE_DEFAULT = 0x0004,
89982+ GR_ROLE_SPECIAL = 0x0008,
89983+ GR_ROLE_AUTH = 0x0010,
89984+ GR_ROLE_NOPW = 0x0020,
89985+ GR_ROLE_GOD = 0x0040,
89986+ GR_ROLE_LEARN = 0x0080,
89987+ GR_ROLE_TPE = 0x0100,
89988+ GR_ROLE_DOMAIN = 0x0200,
89989+ GR_ROLE_PAM = 0x0400,
89990+ GR_ROLE_PERSIST = 0x0800
89991+};
89992+
89993+/* ACL Subject and Object mode flags */
89994+enum {
89995+ GR_DELETED = 0x80000000
89996+};
89997+
89998+/* ACL Object-only mode flags */
89999+enum {
90000+ GR_READ = 0x00000001,
90001+ GR_APPEND = 0x00000002,
90002+ GR_WRITE = 0x00000004,
90003+ GR_EXEC = 0x00000008,
90004+ GR_FIND = 0x00000010,
90005+ GR_INHERIT = 0x00000020,
90006+ GR_SETID = 0x00000040,
90007+ GR_CREATE = 0x00000080,
90008+ GR_DELETE = 0x00000100,
90009+ GR_LINK = 0x00000200,
90010+ GR_AUDIT_READ = 0x00000400,
90011+ GR_AUDIT_APPEND = 0x00000800,
90012+ GR_AUDIT_WRITE = 0x00001000,
90013+ GR_AUDIT_EXEC = 0x00002000,
90014+ GR_AUDIT_FIND = 0x00004000,
90015+ GR_AUDIT_INHERIT= 0x00008000,
90016+ GR_AUDIT_SETID = 0x00010000,
90017+ GR_AUDIT_CREATE = 0x00020000,
90018+ GR_AUDIT_DELETE = 0x00040000,
90019+ GR_AUDIT_LINK = 0x00080000,
90020+ GR_PTRACERD = 0x00100000,
90021+ GR_NOPTRACE = 0x00200000,
90022+ GR_SUPPRESS = 0x00400000,
90023+ GR_NOLEARN = 0x00800000,
90024+ GR_INIT_TRANSFER= 0x01000000
90025+};
90026+
90027+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
90028+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
90029+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
90030+
90031+/* ACL subject-only mode flags */
90032+enum {
90033+ GR_KILL = 0x00000001,
90034+ GR_VIEW = 0x00000002,
90035+ GR_PROTECTED = 0x00000004,
90036+ GR_LEARN = 0x00000008,
90037+ GR_OVERRIDE = 0x00000010,
90038+ /* just a placeholder, this mode is only used in userspace */
90039+ GR_DUMMY = 0x00000020,
90040+ GR_PROTSHM = 0x00000040,
90041+ GR_KILLPROC = 0x00000080,
90042+ GR_KILLIPPROC = 0x00000100,
90043+ /* just a placeholder, this mode is only used in userspace */
90044+ GR_NOTROJAN = 0x00000200,
90045+ GR_PROTPROCFD = 0x00000400,
90046+ GR_PROCACCT = 0x00000800,
90047+ GR_RELAXPTRACE = 0x00001000,
90048+ //GR_NESTED = 0x00002000,
90049+ GR_INHERITLEARN = 0x00004000,
90050+ GR_PROCFIND = 0x00008000,
90051+ GR_POVERRIDE = 0x00010000,
90052+ GR_KERNELAUTH = 0x00020000,
90053+ GR_ATSECURE = 0x00040000,
90054+ GR_SHMEXEC = 0x00080000
90055+};
90056+
90057+enum {
90058+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
90059+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
90060+ GR_PAX_ENABLE_MPROTECT = 0x0004,
90061+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
90062+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
90063+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
90064+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
90065+ GR_PAX_DISABLE_MPROTECT = 0x0400,
90066+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
90067+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
90068+};
90069+
90070+enum {
90071+ GR_ID_USER = 0x01,
90072+ GR_ID_GROUP = 0x02,
90073+};
90074+
90075+enum {
90076+ GR_ID_ALLOW = 0x01,
90077+ GR_ID_DENY = 0x02,
90078+};
90079+
90080+#define GR_CRASH_RES 31
90081+#define GR_UIDTABLE_MAX 500
90082+
90083+/* begin resource learning section */
90084+enum {
90085+ GR_RLIM_CPU_BUMP = 60,
90086+ GR_RLIM_FSIZE_BUMP = 50000,
90087+ GR_RLIM_DATA_BUMP = 10000,
90088+ GR_RLIM_STACK_BUMP = 1000,
90089+ GR_RLIM_CORE_BUMP = 10000,
90090+ GR_RLIM_RSS_BUMP = 500000,
90091+ GR_RLIM_NPROC_BUMP = 1,
90092+ GR_RLIM_NOFILE_BUMP = 5,
90093+ GR_RLIM_MEMLOCK_BUMP = 50000,
90094+ GR_RLIM_AS_BUMP = 500000,
90095+ GR_RLIM_LOCKS_BUMP = 2,
90096+ GR_RLIM_SIGPENDING_BUMP = 5,
90097+ GR_RLIM_MSGQUEUE_BUMP = 10000,
90098+ GR_RLIM_NICE_BUMP = 1,
90099+ GR_RLIM_RTPRIO_BUMP = 1,
90100+ GR_RLIM_RTTIME_BUMP = 1000000
90101+};
90102+
90103+#endif
90104diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
90105new file mode 100644
90106index 0000000..fb1de5d
90107--- /dev/null
90108+++ b/include/linux/grinternal.h
90109@@ -0,0 +1,230 @@
90110+#ifndef __GRINTERNAL_H
90111+#define __GRINTERNAL_H
90112+
90113+#ifdef CONFIG_GRKERNSEC
90114+
90115+#include <linux/fs.h>
90116+#include <linux/mnt_namespace.h>
90117+#include <linux/nsproxy.h>
90118+#include <linux/gracl.h>
90119+#include <linux/grdefs.h>
90120+#include <linux/grmsg.h>
90121+
90122+void gr_add_learn_entry(const char *fmt, ...)
90123+ __attribute__ ((format (printf, 1, 2)));
90124+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
90125+ const struct vfsmount *mnt);
90126+__u32 gr_check_create(const struct dentry *new_dentry,
90127+ const struct dentry *parent,
90128+ const struct vfsmount *mnt, const __u32 mode);
90129+int gr_check_protected_task(const struct task_struct *task);
90130+__u32 to_gr_audit(const __u32 reqmode);
90131+int gr_set_acls(const int type);
90132+int gr_acl_is_enabled(void);
90133+char gr_roletype_to_char(void);
90134+
90135+void gr_handle_alertkill(struct task_struct *task);
90136+char *gr_to_filename(const struct dentry *dentry,
90137+ const struct vfsmount *mnt);
90138+char *gr_to_filename1(const struct dentry *dentry,
90139+ const struct vfsmount *mnt);
90140+char *gr_to_filename2(const struct dentry *dentry,
90141+ const struct vfsmount *mnt);
90142+char *gr_to_filename3(const struct dentry *dentry,
90143+ const struct vfsmount *mnt);
90144+
90145+extern int grsec_enable_ptrace_readexec;
90146+extern int grsec_enable_harden_ptrace;
90147+extern int grsec_enable_link;
90148+extern int grsec_enable_fifo;
90149+extern int grsec_enable_execve;
90150+extern int grsec_enable_shm;
90151+extern int grsec_enable_execlog;
90152+extern int grsec_enable_signal;
90153+extern int grsec_enable_audit_ptrace;
90154+extern int grsec_enable_forkfail;
90155+extern int grsec_enable_time;
90156+extern int grsec_enable_rofs;
90157+extern int grsec_deny_new_usb;
90158+extern int grsec_enable_chroot_shmat;
90159+extern int grsec_enable_chroot_mount;
90160+extern int grsec_enable_chroot_double;
90161+extern int grsec_enable_chroot_pivot;
90162+extern int grsec_enable_chroot_chdir;
90163+extern int grsec_enable_chroot_chmod;
90164+extern int grsec_enable_chroot_mknod;
90165+extern int grsec_enable_chroot_fchdir;
90166+extern int grsec_enable_chroot_nice;
90167+extern int grsec_enable_chroot_execlog;
90168+extern int grsec_enable_chroot_caps;
90169+extern int grsec_enable_chroot_rename;
90170+extern int grsec_enable_chroot_sysctl;
90171+extern int grsec_enable_chroot_unix;
90172+extern int grsec_enable_symlinkown;
90173+extern kgid_t grsec_symlinkown_gid;
90174+extern int grsec_enable_tpe;
90175+extern kgid_t grsec_tpe_gid;
90176+extern int grsec_enable_tpe_all;
90177+extern int grsec_enable_tpe_invert;
90178+extern int grsec_enable_socket_all;
90179+extern kgid_t grsec_socket_all_gid;
90180+extern int grsec_enable_socket_client;
90181+extern kgid_t grsec_socket_client_gid;
90182+extern int grsec_enable_socket_server;
90183+extern kgid_t grsec_socket_server_gid;
90184+extern kgid_t grsec_audit_gid;
90185+extern int grsec_enable_group;
90186+extern int grsec_enable_log_rwxmaps;
90187+extern int grsec_enable_mount;
90188+extern int grsec_enable_chdir;
90189+extern int grsec_resource_logging;
90190+extern int grsec_enable_blackhole;
90191+extern int grsec_lastack_retries;
90192+extern int grsec_enable_brute;
90193+extern int grsec_enable_harden_ipc;
90194+extern int grsec_lock;
90195+
90196+extern spinlock_t grsec_alert_lock;
90197+extern unsigned long grsec_alert_wtime;
90198+extern unsigned long grsec_alert_fyet;
90199+
90200+extern spinlock_t grsec_audit_lock;
90201+
90202+extern rwlock_t grsec_exec_file_lock;
90203+
90204+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
90205+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
90206+ (tsk)->exec_file->f_path.mnt) : "/")
90207+
90208+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
90209+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
90210+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
90211+
90212+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
90213+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
90214+ (tsk)->exec_file->f_path.mnt) : "/")
90215+
90216+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
90217+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
90218+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
90219+
90220+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
90221+
90222+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
90223+
90224+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
90225+{
90226+ if (file1 && file2) {
90227+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
90228+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
90229+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
90230+ return true;
90231+ }
90232+
90233+ return false;
90234+}
90235+
90236+#define GR_CHROOT_CAPS {{ \
90237+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
90238+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
90239+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
90240+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
90241+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
90242+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
90243+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
90244+
90245+#define security_learn(normal_msg,args...) \
90246+({ \
90247+ read_lock(&grsec_exec_file_lock); \
90248+ gr_add_learn_entry(normal_msg "\n", ## args); \
90249+ read_unlock(&grsec_exec_file_lock); \
90250+})
90251+
90252+enum {
90253+ GR_DO_AUDIT,
90254+ GR_DONT_AUDIT,
90255+ /* used for non-audit messages that we shouldn't kill the task on */
90256+ GR_DONT_AUDIT_GOOD
90257+};
90258+
90259+enum {
90260+ GR_TTYSNIFF,
90261+ GR_RBAC,
90262+ GR_RBAC_STR,
90263+ GR_STR_RBAC,
90264+ GR_RBAC_MODE2,
90265+ GR_RBAC_MODE3,
90266+ GR_FILENAME,
90267+ GR_SYSCTL_HIDDEN,
90268+ GR_NOARGS,
90269+ GR_ONE_INT,
90270+ GR_ONE_INT_TWO_STR,
90271+ GR_ONE_STR,
90272+ GR_STR_INT,
90273+ GR_TWO_STR_INT,
90274+ GR_TWO_INT,
90275+ GR_TWO_U64,
90276+ GR_THREE_INT,
90277+ GR_FIVE_INT_TWO_STR,
90278+ GR_TWO_STR,
90279+ GR_THREE_STR,
90280+ GR_FOUR_STR,
90281+ GR_STR_FILENAME,
90282+ GR_FILENAME_STR,
90283+ GR_FILENAME_TWO_INT,
90284+ GR_FILENAME_TWO_INT_STR,
90285+ GR_TEXTREL,
90286+ GR_PTRACE,
90287+ GR_RESOURCE,
90288+ GR_CAP,
90289+ GR_SIG,
90290+ GR_SIG2,
90291+ GR_CRASH1,
90292+ GR_CRASH2,
90293+ GR_PSACCT,
90294+ GR_RWXMAP,
90295+ GR_RWXMAPVMA
90296+};
90297+
90298+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
90299+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
90300+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
90301+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
90302+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
90303+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
90304+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
90305+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
90306+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
90307+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
90308+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
90309+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
90310+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
90311+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
90312+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
90313+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
90314+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
90315+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
90316+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
90317+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
90318+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
90319+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
90320+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
90321+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
90322+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
90323+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
90324+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
90325+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
90326+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
90327+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
90328+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
90329+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
90330+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
90331+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
90332+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
90333+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
90334+
90335+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
90336+
90337+#endif
90338+
90339+#endif
90340diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
90341new file mode 100644
90342index 0000000..26ef560
90343--- /dev/null
90344+++ b/include/linux/grmsg.h
90345@@ -0,0 +1,118 @@
90346+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
90347+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
90348+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
90349+#define GR_STOPMOD_MSG "denied modification of module state by "
90350+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
90351+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
90352+#define GR_IOPERM_MSG "denied use of ioperm() by "
90353+#define GR_IOPL_MSG "denied use of iopl() by "
90354+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
90355+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
90356+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
90357+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
90358+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
90359+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
90360+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
90361+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
90362+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
90363+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
90364+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
90365+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
90366+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
90367+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
90368+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
90369+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
90370+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
90371+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
90372+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
90373+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
90374+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
90375+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
90376+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
90377+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
90378+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
90379+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
90380+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
90381+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
90382+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
90383+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
90384+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
90385+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
90386+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
90387+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
90388+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
90389+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
90390+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
90391+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
90392+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
90393+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
90394+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
90395+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
90396+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
90397+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
90398+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
90399+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
90400+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
90401+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
90402+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
90403+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
90404+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
90405+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
90406+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
90407+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
90408+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
90409+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
90410+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
90411+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
90412+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
90413+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
90414+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
90415+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
90416+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
90417+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
90418+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
90419+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
90420+#define GR_FAILFORK_MSG "failed fork with errno %s by "
90421+#define GR_NICE_CHROOT_MSG "denied priority change by "
90422+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
90423+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
90424+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
90425+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
90426+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
90427+#define GR_TIME_MSG "time set by "
90428+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
90429+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
90430+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
90431+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
90432+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
90433+#define GR_BIND_MSG "denied bind() by "
90434+#define GR_CONNECT_MSG "denied connect() by "
90435+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
90436+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
90437+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
90438+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
90439+#define GR_CAP_ACL_MSG "use of %s denied for "
90440+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
90441+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
90442+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
90443+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
90444+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
90445+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
90446+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
90447+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
90448+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
90449+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
90450+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
90451+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
90452+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
90453+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
90454+#define GR_VM86_MSG "denied use of vm86 by "
90455+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
90456+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
90457+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
90458+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
90459+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
90460+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
90461+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
90462+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
90463+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
90464diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
90465new file mode 100644
90466index 0000000..63c1850
90467--- /dev/null
90468+++ b/include/linux/grsecurity.h
90469@@ -0,0 +1,250 @@
90470+#ifndef GR_SECURITY_H
90471+#define GR_SECURITY_H
90472+#include <linux/fs.h>
90473+#include <linux/fs_struct.h>
90474+#include <linux/binfmts.h>
90475+#include <linux/gracl.h>
90476+
90477+/* notify of brain-dead configs */
90478+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90479+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
90480+#endif
90481+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90482+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
90483+#endif
90484+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
90485+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
90486+#endif
90487+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
90488+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
90489+#endif
90490+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
90491+#error "CONFIG_PAX enabled, but no PaX options are enabled."
90492+#endif
90493+
90494+int gr_handle_new_usb(void);
90495+
90496+void gr_handle_brute_attach(int dumpable);
90497+void gr_handle_brute_check(void);
90498+void gr_handle_kernel_exploit(void);
90499+
90500+char gr_roletype_to_char(void);
90501+
90502+int gr_proc_is_restricted(void);
90503+
90504+int gr_acl_enable_at_secure(void);
90505+
90506+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
90507+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
90508+
90509+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
90510+
90511+void gr_del_task_from_ip_table(struct task_struct *p);
90512+
90513+int gr_pid_is_chrooted(struct task_struct *p);
90514+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
90515+int gr_handle_chroot_nice(void);
90516+int gr_handle_chroot_sysctl(const int op);
90517+int gr_handle_chroot_setpriority(struct task_struct *p,
90518+ const int niceval);
90519+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
90520+int gr_chroot_fhandle(void);
90521+int gr_handle_chroot_chroot(const struct dentry *dentry,
90522+ const struct vfsmount *mnt);
90523+void gr_handle_chroot_chdir(const struct path *path);
90524+int gr_handle_chroot_chmod(const struct dentry *dentry,
90525+ const struct vfsmount *mnt, const int mode);
90526+int gr_handle_chroot_mknod(const struct dentry *dentry,
90527+ const struct vfsmount *mnt, const int mode);
90528+int gr_handle_chroot_mount(const struct dentry *dentry,
90529+ const struct vfsmount *mnt,
90530+ const char *dev_name);
90531+int gr_handle_chroot_pivot(void);
90532+int gr_handle_chroot_unix(const pid_t pid);
90533+
90534+int gr_handle_rawio(const struct inode *inode);
90535+
90536+void gr_handle_ioperm(void);
90537+void gr_handle_iopl(void);
90538+void gr_handle_msr_write(void);
90539+
90540+umode_t gr_acl_umask(void);
90541+
90542+int gr_tpe_allow(const struct file *file);
90543+
90544+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
90545+void gr_clear_chroot_entries(struct task_struct *task);
90546+
90547+void gr_log_forkfail(const int retval);
90548+void gr_log_timechange(void);
90549+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
90550+void gr_log_chdir(const struct dentry *dentry,
90551+ const struct vfsmount *mnt);
90552+void gr_log_chroot_exec(const struct dentry *dentry,
90553+ const struct vfsmount *mnt);
90554+void gr_log_remount(const char *devname, const int retval);
90555+void gr_log_unmount(const char *devname, const int retval);
90556+void gr_log_mount(const char *from, struct path *to, const int retval);
90557+void gr_log_textrel(struct vm_area_struct *vma);
90558+void gr_log_ptgnustack(struct file *file);
90559+void gr_log_rwxmmap(struct file *file);
90560+void gr_log_rwxmprotect(struct vm_area_struct *vma);
90561+
90562+int gr_handle_follow_link(const struct inode *parent,
90563+ const struct inode *inode,
90564+ const struct dentry *dentry,
90565+ const struct vfsmount *mnt);
90566+int gr_handle_fifo(const struct dentry *dentry,
90567+ const struct vfsmount *mnt,
90568+ const struct dentry *dir, const int flag,
90569+ const int acc_mode);
90570+int gr_handle_hardlink(const struct dentry *dentry,
90571+ const struct vfsmount *mnt,
90572+ struct inode *inode,
90573+ const int mode, const struct filename *to);
90574+
90575+int gr_is_capable(const int cap);
90576+int gr_is_capable_nolog(const int cap);
90577+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
90578+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
90579+
90580+void gr_copy_label(struct task_struct *tsk);
90581+void gr_handle_crash(struct task_struct *task, const int sig);
90582+int gr_handle_signal(const struct task_struct *p, const int sig);
90583+int gr_check_crash_uid(const kuid_t uid);
90584+int gr_check_protected_task(const struct task_struct *task);
90585+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
90586+int gr_acl_handle_mmap(const struct file *file,
90587+ const unsigned long prot);
90588+int gr_acl_handle_mprotect(const struct file *file,
90589+ const unsigned long prot);
90590+int gr_check_hidden_task(const struct task_struct *tsk);
90591+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
90592+ const struct vfsmount *mnt);
90593+__u32 gr_acl_handle_utime(const struct dentry *dentry,
90594+ const struct vfsmount *mnt);
90595+__u32 gr_acl_handle_access(const struct dentry *dentry,
90596+ const struct vfsmount *mnt, const int fmode);
90597+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
90598+ const struct vfsmount *mnt, umode_t *mode);
90599+__u32 gr_acl_handle_chown(const struct dentry *dentry,
90600+ const struct vfsmount *mnt);
90601+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
90602+ const struct vfsmount *mnt);
90603+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
90604+ const struct vfsmount *mnt);
90605+int gr_handle_ptrace(struct task_struct *task, const long request);
90606+int gr_handle_proc_ptrace(struct task_struct *task);
90607+__u32 gr_acl_handle_execve(const struct dentry *dentry,
90608+ const struct vfsmount *mnt);
90609+int gr_check_crash_exec(const struct file *filp);
90610+int gr_acl_is_enabled(void);
90611+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
90612+ const kgid_t gid);
90613+int gr_set_proc_label(const struct dentry *dentry,
90614+ const struct vfsmount *mnt,
90615+ const int unsafe_flags);
90616+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
90617+ const struct vfsmount *mnt);
90618+__u32 gr_acl_handle_open(const struct dentry *dentry,
90619+ const struct vfsmount *mnt, int acc_mode);
90620+__u32 gr_acl_handle_creat(const struct dentry *dentry,
90621+ const struct dentry *p_dentry,
90622+ const struct vfsmount *p_mnt,
90623+ int open_flags, int acc_mode, const int imode);
90624+void gr_handle_create(const struct dentry *dentry,
90625+ const struct vfsmount *mnt);
90626+void gr_handle_proc_create(const struct dentry *dentry,
90627+ const struct inode *inode);
90628+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
90629+ const struct dentry *parent_dentry,
90630+ const struct vfsmount *parent_mnt,
90631+ const int mode);
90632+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
90633+ const struct dentry *parent_dentry,
90634+ const struct vfsmount *parent_mnt);
90635+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
90636+ const struct vfsmount *mnt);
90637+void gr_handle_delete(const u64 ino, const dev_t dev);
90638+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
90639+ const struct vfsmount *mnt);
90640+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
90641+ const struct dentry *parent_dentry,
90642+ const struct vfsmount *parent_mnt,
90643+ const struct filename *from);
90644+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
90645+ const struct dentry *parent_dentry,
90646+ const struct vfsmount *parent_mnt,
90647+ const struct dentry *old_dentry,
90648+ const struct vfsmount *old_mnt, const struct filename *to);
90649+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
90650+int gr_acl_handle_rename(struct dentry *new_dentry,
90651+ struct dentry *parent_dentry,
90652+ const struct vfsmount *parent_mnt,
90653+ struct dentry *old_dentry,
90654+ struct inode *old_parent_inode,
90655+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
90656+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
90657+ struct dentry *old_dentry,
90658+ struct dentry *new_dentry,
90659+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
90660+__u32 gr_check_link(const struct dentry *new_dentry,
90661+ const struct dentry *parent_dentry,
90662+ const struct vfsmount *parent_mnt,
90663+ const struct dentry *old_dentry,
90664+ const struct vfsmount *old_mnt);
90665+int gr_acl_handle_filldir(const struct file *file, const char *name,
90666+ const unsigned int namelen, const u64 ino);
90667+
90668+__u32 gr_acl_handle_unix(const struct dentry *dentry,
90669+ const struct vfsmount *mnt);
90670+void gr_acl_handle_exit(void);
90671+void gr_acl_handle_psacct(struct task_struct *task, const long code);
90672+int gr_acl_handle_procpidmem(const struct task_struct *task);
90673+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
90674+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
90675+void gr_audit_ptrace(struct task_struct *task);
90676+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
90677+u64 gr_get_ino_from_dentry(struct dentry *dentry);
90678+void gr_put_exec_file(struct task_struct *task);
90679+
90680+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
90681+
90682+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
90683+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
90684+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
90685+ struct dentry *newdentry, struct vfsmount *newmnt);
90686+
90687+#ifdef CONFIG_GRKERNSEC_RESLOG
90688+extern void gr_log_resource(const struct task_struct *task, const int res,
90689+ const unsigned long wanted, const int gt);
90690+#else
90691+static inline void gr_log_resource(const struct task_struct *task, const int res,
90692+ const unsigned long wanted, const int gt)
90693+{
90694+}
90695+#endif
90696+
90697+#ifdef CONFIG_GRKERNSEC
90698+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
90699+void gr_handle_vm86(void);
90700+void gr_handle_mem_readwrite(u64 from, u64 to);
90701+
90702+void gr_log_badprocpid(const char *entry);
90703+
90704+extern int grsec_enable_dmesg;
90705+extern int grsec_disable_privio;
90706+
90707+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
90708+extern kgid_t grsec_proc_gid;
90709+#endif
90710+
90711+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
90712+extern int grsec_enable_chroot_findtask;
90713+#endif
90714+#ifdef CONFIG_GRKERNSEC_SETXID
90715+extern int grsec_enable_setxid;
90716+#endif
90717+#endif
90718+
90719+#endif
90720diff --git a/include/linux/grsock.h b/include/linux/grsock.h
90721new file mode 100644
90722index 0000000..e7ffaaf
90723--- /dev/null
90724+++ b/include/linux/grsock.h
90725@@ -0,0 +1,19 @@
90726+#ifndef __GRSOCK_H
90727+#define __GRSOCK_H
90728+
90729+extern void gr_attach_curr_ip(const struct sock *sk);
90730+extern int gr_handle_sock_all(const int family, const int type,
90731+ const int protocol);
90732+extern int gr_handle_sock_server(const struct sockaddr *sck);
90733+extern int gr_handle_sock_server_other(const struct sock *sck);
90734+extern int gr_handle_sock_client(const struct sockaddr *sck);
90735+extern int gr_search_connect(struct socket * sock,
90736+ struct sockaddr_in * addr);
90737+extern int gr_search_bind(struct socket * sock,
90738+ struct sockaddr_in * addr);
90739+extern int gr_search_listen(struct socket * sock);
90740+extern int gr_search_accept(struct socket * sock);
90741+extern int gr_search_socket(const int domain, const int type,
90742+ const int protocol);
90743+
90744+#endif
90745diff --git a/include/linux/highmem.h b/include/linux/highmem.h
90746index 9286a46..373f27f 100644
90747--- a/include/linux/highmem.h
90748+++ b/include/linux/highmem.h
90749@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
90750 kunmap_atomic(kaddr);
90751 }
90752
90753+static inline void sanitize_highpage(struct page *page)
90754+{
90755+ void *kaddr;
90756+ unsigned long flags;
90757+
90758+ local_irq_save(flags);
90759+ kaddr = kmap_atomic(page);
90760+ clear_page(kaddr);
90761+ kunmap_atomic(kaddr);
90762+ local_irq_restore(flags);
90763+}
90764+
90765 static inline void zero_user_segments(struct page *page,
90766 unsigned start1, unsigned end1,
90767 unsigned start2, unsigned end2)
90768diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
90769index 1c7b89a..7dda400 100644
90770--- a/include/linux/hwmon-sysfs.h
90771+++ b/include/linux/hwmon-sysfs.h
90772@@ -25,7 +25,8 @@
90773 struct sensor_device_attribute{
90774 struct device_attribute dev_attr;
90775 int index;
90776-};
90777+} __do_const;
90778+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
90779 #define to_sensor_dev_attr(_dev_attr) \
90780 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
90781
90782@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
90783 struct device_attribute dev_attr;
90784 u8 index;
90785 u8 nr;
90786-};
90787+} __do_const;
90788+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
90789 #define to_sensor_dev_attr_2(_dev_attr) \
90790 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
90791
90792diff --git a/include/linux/i2c.h b/include/linux/i2c.h
90793index f17da50..2f8b203 100644
90794--- a/include/linux/i2c.h
90795+++ b/include/linux/i2c.h
90796@@ -409,6 +409,7 @@ struct i2c_algorithm {
90797 int (*unreg_slave)(struct i2c_client *client);
90798 #endif
90799 };
90800+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
90801
90802 /**
90803 * struct i2c_bus_recovery_info - I2C bus recovery information
90804diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
90805index aff7ad8..3942bbd 100644
90806--- a/include/linux/if_pppox.h
90807+++ b/include/linux/if_pppox.h
90808@@ -76,7 +76,7 @@ struct pppox_proto {
90809 int (*ioctl)(struct socket *sock, unsigned int cmd,
90810 unsigned long arg);
90811 struct module *owner;
90812-};
90813+} __do_const;
90814
90815 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
90816 extern void unregister_pppox_proto(int proto_num);
90817diff --git a/include/linux/init.h b/include/linux/init.h
90818index 2df8e8d..3e1280d 100644
90819--- a/include/linux/init.h
90820+++ b/include/linux/init.h
90821@@ -37,9 +37,17 @@
90822 * section.
90823 */
90824
90825+#define add_init_latent_entropy __latent_entropy
90826+
90827+#ifdef CONFIG_MEMORY_HOTPLUG
90828+#define add_meminit_latent_entropy
90829+#else
90830+#define add_meminit_latent_entropy __latent_entropy
90831+#endif
90832+
90833 /* These are for everybody (although not all archs will actually
90834 discard it in modules) */
90835-#define __init __section(.init.text) __cold notrace
90836+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
90837 #define __initdata __section(.init.data)
90838 #define __initconst __constsection(.init.rodata)
90839 #define __exitdata __section(.exit.data)
90840@@ -100,7 +108,7 @@
90841 #define __cpuexitconst
90842
90843 /* Used for MEMORY_HOTPLUG */
90844-#define __meminit __section(.meminit.text) __cold notrace
90845+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
90846 #define __meminitdata __section(.meminit.data)
90847 #define __meminitconst __constsection(.meminit.rodata)
90848 #define __memexit __section(.memexit.text) __exitused __cold notrace
90849diff --git a/include/linux/init_task.h b/include/linux/init_task.h
90850index 696d223..6d6b39f 100644
90851--- a/include/linux/init_task.h
90852+++ b/include/linux/init_task.h
90853@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
90854
90855 #define INIT_TASK_COMM "swapper"
90856
90857+#ifdef CONFIG_X86
90858+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
90859+#else
90860+#define INIT_TASK_THREAD_INFO
90861+#endif
90862+
90863 #ifdef CONFIG_RT_MUTEXES
90864 # define INIT_RT_MUTEXES(tsk) \
90865 .pi_waiters = RB_ROOT, \
90866@@ -224,6 +230,7 @@ extern struct task_group root_task_group;
90867 RCU_POINTER_INITIALIZER(cred, &init_cred), \
90868 .comm = INIT_TASK_COMM, \
90869 .thread = INIT_THREAD, \
90870+ INIT_TASK_THREAD_INFO \
90871 .fs = &init_fs, \
90872 .files = &init_files, \
90873 .signal = &init_signals, \
90874diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
90875index 2e88580..f6a99a0 100644
90876--- a/include/linux/interrupt.h
90877+++ b/include/linux/interrupt.h
90878@@ -420,8 +420,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
90879
90880 struct softirq_action
90881 {
90882- void (*action)(struct softirq_action *);
90883-};
90884+ void (*action)(void);
90885+} __no_const;
90886
90887 asmlinkage void do_softirq(void);
90888 asmlinkage void __do_softirq(void);
90889@@ -435,7 +435,7 @@ static inline void do_softirq_own_stack(void)
90890 }
90891 #endif
90892
90893-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
90894+extern void open_softirq(int nr, void (*action)(void));
90895 extern void softirq_init(void);
90896 extern void __raise_softirq_irqoff(unsigned int nr);
90897
90898diff --git a/include/linux/iommu.h b/include/linux/iommu.h
90899index 38daa45..4de4317 100644
90900--- a/include/linux/iommu.h
90901+++ b/include/linux/iommu.h
90902@@ -147,7 +147,7 @@ struct iommu_ops {
90903
90904 unsigned long pgsize_bitmap;
90905 void *priv;
90906-};
90907+} __do_const;
90908
90909 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
90910 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
90911diff --git a/include/linux/ioport.h b/include/linux/ioport.h
90912index 2c525022..345b106 100644
90913--- a/include/linux/ioport.h
90914+++ b/include/linux/ioport.h
90915@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
90916 int adjust_resource(struct resource *res, resource_size_t start,
90917 resource_size_t size);
90918 resource_size_t resource_alignment(struct resource *res);
90919-static inline resource_size_t resource_size(const struct resource *res)
90920+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
90921 {
90922 return res->end - res->start + 1;
90923 }
90924diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
90925index 1eee6bc..9cf4912 100644
90926--- a/include/linux/ipc_namespace.h
90927+++ b/include/linux/ipc_namespace.h
90928@@ -60,7 +60,7 @@ struct ipc_namespace {
90929 struct user_namespace *user_ns;
90930
90931 struct ns_common ns;
90932-};
90933+} __randomize_layout;
90934
90935 extern struct ipc_namespace init_ipc_ns;
90936 extern atomic_t nr_ipc_ns;
90937diff --git a/include/linux/irq.h b/include/linux/irq.h
90938index d09ec7a..f373eb5 100644
90939--- a/include/linux/irq.h
90940+++ b/include/linux/irq.h
90941@@ -364,7 +364,8 @@ struct irq_chip {
90942 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
90943
90944 unsigned long flags;
90945-};
90946+} __do_const;
90947+typedef struct irq_chip __no_const irq_chip_no_const;
90948
90949 /*
90950 * irq_chip specific flags
90951diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
90952index 71d706d..817cdec 100644
90953--- a/include/linux/irqchip/arm-gic.h
90954+++ b/include/linux/irqchip/arm-gic.h
90955@@ -95,7 +95,7 @@
90956
90957 struct device_node;
90958
90959-extern struct irq_chip gic_arch_extn;
90960+extern irq_chip_no_const gic_arch_extn;
90961
90962 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
90963 u32 offset, struct device_node *);
90964diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
90965index dd1109f..4f4fdda 100644
90966--- a/include/linux/irqdesc.h
90967+++ b/include/linux/irqdesc.h
90968@@ -61,7 +61,7 @@ struct irq_desc {
90969 unsigned int irq_count; /* For detecting broken IRQs */
90970 unsigned long last_unhandled; /* Aging timer for unhandled count */
90971 unsigned int irqs_unhandled;
90972- atomic_t threads_handled;
90973+ atomic_unchecked_t threads_handled;
90974 int threads_handled_last;
90975 raw_spinlock_t lock;
90976 struct cpumask *percpu_enabled;
90977diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
90978index 676d730..5e05daec 100644
90979--- a/include/linux/irqdomain.h
90980+++ b/include/linux/irqdomain.h
90981@@ -40,6 +40,7 @@ struct device_node;
90982 struct irq_domain;
90983 struct of_device_id;
90984 struct irq_chip;
90985+struct irq_chip_no_const;
90986 struct irq_data;
90987
90988 /* Number of irqs reserved for a legacy isa controller */
90989diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
90990index c367cbd..c9b79e6 100644
90991--- a/include/linux/jiffies.h
90992+++ b/include/linux/jiffies.h
90993@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
90994 /*
90995 * Convert various time units to each other:
90996 */
90997-extern unsigned int jiffies_to_msecs(const unsigned long j);
90998-extern unsigned int jiffies_to_usecs(const unsigned long j);
90999+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
91000+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
91001
91002-static inline u64 jiffies_to_nsecs(const unsigned long j)
91003+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
91004 {
91005 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
91006 }
91007
91008-extern unsigned long msecs_to_jiffies(const unsigned int m);
91009-extern unsigned long usecs_to_jiffies(const unsigned int u);
91010+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
91011+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
91012 extern unsigned long timespec_to_jiffies(const struct timespec *value);
91013 extern void jiffies_to_timespec(const unsigned long jiffies,
91014- struct timespec *value);
91015-extern unsigned long timeval_to_jiffies(const struct timeval *value);
91016+ struct timespec *value) __intentional_overflow(-1);
91017+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
91018 extern void jiffies_to_timeval(const unsigned long jiffies,
91019 struct timeval *value);
91020
91021diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
91022index 6883e19..e854fcb 100644
91023--- a/include/linux/kallsyms.h
91024+++ b/include/linux/kallsyms.h
91025@@ -15,7 +15,8 @@
91026
91027 struct module;
91028
91029-#ifdef CONFIG_KALLSYMS
91030+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
91031+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91032 /* Lookup the address for a symbol. Returns 0 if not found. */
91033 unsigned long kallsyms_lookup_name(const char *name);
91034
91035@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
91036 /* Stupid that this does nothing, but I didn't create this mess. */
91037 #define __print_symbol(fmt, addr)
91038 #endif /*CONFIG_KALLSYMS*/
91039+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
91040+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
91041+extern unsigned long kallsyms_lookup_name(const char *name);
91042+extern void __print_symbol(const char *fmt, unsigned long address);
91043+extern int sprint_backtrace(char *buffer, unsigned long address);
91044+extern int sprint_symbol(char *buffer, unsigned long address);
91045+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
91046+const char *kallsyms_lookup(unsigned long addr,
91047+ unsigned long *symbolsize,
91048+ unsigned long *offset,
91049+ char **modname, char *namebuf);
91050+extern int kallsyms_lookup_size_offset(unsigned long addr,
91051+ unsigned long *symbolsize,
91052+ unsigned long *offset);
91053+#endif
91054
91055 /* This macro allows us to keep printk typechecking */
91056 static __printf(1, 2)
91057diff --git a/include/linux/kernel.h b/include/linux/kernel.h
91058index d6d630d..feea1f5 100644
91059--- a/include/linux/kernel.h
91060+++ b/include/linux/kernel.h
91061@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
91062 /* Obsolete, do not use. Use kstrto<foo> instead */
91063
91064 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
91065-extern long simple_strtol(const char *,char **,unsigned int);
91066+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
91067 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
91068 extern long long simple_strtoll(const char *,char **,unsigned int);
91069
91070diff --git a/include/linux/key-type.h b/include/linux/key-type.h
91071index ff9f1d3..6712be5 100644
91072--- a/include/linux/key-type.h
91073+++ b/include/linux/key-type.h
91074@@ -152,7 +152,7 @@ struct key_type {
91075 /* internal fields */
91076 struct list_head link; /* link in types list */
91077 struct lock_class_key lock_class; /* key->sem lock class */
91078-};
91079+} __do_const;
91080
91081 extern struct key_type key_type_keyring;
91082
91083diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
91084index e465bb1..19f605fd 100644
91085--- a/include/linux/kgdb.h
91086+++ b/include/linux/kgdb.h
91087@@ -52,7 +52,7 @@ extern int kgdb_connected;
91088 extern int kgdb_io_module_registered;
91089
91090 extern atomic_t kgdb_setting_breakpoint;
91091-extern atomic_t kgdb_cpu_doing_single_step;
91092+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
91093
91094 extern struct task_struct *kgdb_usethread;
91095 extern struct task_struct *kgdb_contthread;
91096@@ -254,7 +254,7 @@ struct kgdb_arch {
91097 void (*correct_hw_break)(void);
91098
91099 void (*enable_nmi)(bool on);
91100-};
91101+} __do_const;
91102
91103 /**
91104 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
91105@@ -279,7 +279,7 @@ struct kgdb_io {
91106 void (*pre_exception) (void);
91107 void (*post_exception) (void);
91108 int is_console;
91109-};
91110+} __do_const;
91111
91112 extern struct kgdb_arch arch_kgdb_ops;
91113
91114diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
91115index e705467..a92471d 100644
91116--- a/include/linux/kmemleak.h
91117+++ b/include/linux/kmemleak.h
91118@@ -27,7 +27,7 @@
91119
91120 extern void kmemleak_init(void) __ref;
91121 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
91122- gfp_t gfp) __ref;
91123+ gfp_t gfp) __ref __size_overflow(2);
91124 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
91125 extern void kmemleak_free(const void *ptr) __ref;
91126 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
91127@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
91128 static inline void kmemleak_init(void)
91129 {
91130 }
91131-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
91132+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
91133 gfp_t gfp)
91134 {
91135 }
91136diff --git a/include/linux/kmod.h b/include/linux/kmod.h
91137index 0555cc6..40116ce 100644
91138--- a/include/linux/kmod.h
91139+++ b/include/linux/kmod.h
91140@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
91141 * usually useless though. */
91142 extern __printf(2, 3)
91143 int __request_module(bool wait, const char *name, ...);
91144+extern __printf(3, 4)
91145+int ___request_module(bool wait, char *param_name, const char *name, ...);
91146 #define request_module(mod...) __request_module(true, mod)
91147 #define request_module_nowait(mod...) __request_module(false, mod)
91148 #define try_then_request_module(x, mod...) \
91149@@ -57,6 +59,9 @@ struct subprocess_info {
91150 struct work_struct work;
91151 struct completion *complete;
91152 char *path;
91153+#ifdef CONFIG_GRKERNSEC
91154+ char *origpath;
91155+#endif
91156 char **argv;
91157 char **envp;
91158 int wait;
91159diff --git a/include/linux/kobject.h b/include/linux/kobject.h
91160index 2d61b90..a1d0a13 100644
91161--- a/include/linux/kobject.h
91162+++ b/include/linux/kobject.h
91163@@ -118,7 +118,7 @@ struct kobj_type {
91164 struct attribute **default_attrs;
91165 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
91166 const void *(*namespace)(struct kobject *kobj);
91167-};
91168+} __do_const;
91169
91170 struct kobj_uevent_env {
91171 char *argv[3];
91172@@ -142,6 +142,7 @@ struct kobj_attribute {
91173 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
91174 const char *buf, size_t count);
91175 };
91176+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
91177
91178 extern const struct sysfs_ops kobj_sysfs_ops;
91179
91180@@ -169,7 +170,7 @@ struct kset {
91181 spinlock_t list_lock;
91182 struct kobject kobj;
91183 const struct kset_uevent_ops *uevent_ops;
91184-};
91185+} __randomize_layout;
91186
91187 extern void kset_init(struct kset *kset);
91188 extern int __must_check kset_register(struct kset *kset);
91189diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
91190index df32d25..fb52e27 100644
91191--- a/include/linux/kobject_ns.h
91192+++ b/include/linux/kobject_ns.h
91193@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
91194 const void *(*netlink_ns)(struct sock *sk);
91195 const void *(*initial_ns)(void);
91196 void (*drop_ns)(void *);
91197-};
91198+} __do_const;
91199
91200 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
91201 int kobj_ns_type_registered(enum kobj_ns_type type);
91202diff --git a/include/linux/kref.h b/include/linux/kref.h
91203index 484604d..0f6c5b6 100644
91204--- a/include/linux/kref.h
91205+++ b/include/linux/kref.h
91206@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
91207 static inline int kref_sub(struct kref *kref, unsigned int count,
91208 void (*release)(struct kref *kref))
91209 {
91210- WARN_ON(release == NULL);
91211+ BUG_ON(release == NULL);
91212
91213 if (atomic_sub_and_test((int) count, &kref->refcount)) {
91214 release(kref);
91215diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
91216index d12b210..d91fd76 100644
91217--- a/include/linux/kvm_host.h
91218+++ b/include/linux/kvm_host.h
91219@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
91220 {
91221 }
91222 #endif
91223-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
91224+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
91225 struct module *module);
91226 void kvm_exit(void);
91227
91228@@ -633,7 +633,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
91229 struct kvm_guest_debug *dbg);
91230 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
91231
91232-int kvm_arch_init(void *opaque);
91233+int kvm_arch_init(const void *opaque);
91234 void kvm_arch_exit(void);
91235
91236 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
91237diff --git a/include/linux/libata.h b/include/linux/libata.h
91238index 6b08cc1..248c5e9 100644
91239--- a/include/linux/libata.h
91240+++ b/include/linux/libata.h
91241@@ -980,7 +980,7 @@ struct ata_port_operations {
91242 * fields must be pointers.
91243 */
91244 const struct ata_port_operations *inherits;
91245-};
91246+} __do_const;
91247
91248 struct ata_port_info {
91249 unsigned long flags;
91250diff --git a/include/linux/linkage.h b/include/linux/linkage.h
91251index a6a42dd..6c5ebce 100644
91252--- a/include/linux/linkage.h
91253+++ b/include/linux/linkage.h
91254@@ -36,6 +36,7 @@
91255 #endif
91256
91257 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
91258+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
91259 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
91260
91261 /*
91262diff --git a/include/linux/list.h b/include/linux/list.h
91263index feb773c..98f3075 100644
91264--- a/include/linux/list.h
91265+++ b/include/linux/list.h
91266@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
91267 extern void list_del(struct list_head *entry);
91268 #endif
91269
91270+extern void __pax_list_add(struct list_head *new,
91271+ struct list_head *prev,
91272+ struct list_head *next);
91273+static inline void pax_list_add(struct list_head *new, struct list_head *head)
91274+{
91275+ __pax_list_add(new, head, head->next);
91276+}
91277+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
91278+{
91279+ __pax_list_add(new, head->prev, head);
91280+}
91281+extern void pax_list_del(struct list_head *entry);
91282+
91283 /**
91284 * list_replace - replace old entry by new one
91285 * @old : the element to be replaced
91286@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
91287 INIT_LIST_HEAD(entry);
91288 }
91289
91290+extern void pax_list_del_init(struct list_head *entry);
91291+
91292 /**
91293 * list_move - delete from one list and add as another's head
91294 * @list: the entry to move
91295diff --git a/include/linux/lockref.h b/include/linux/lockref.h
91296index b10b122..d37b3de 100644
91297--- a/include/linux/lockref.h
91298+++ b/include/linux/lockref.h
91299@@ -28,7 +28,7 @@ struct lockref {
91300 #endif
91301 struct {
91302 spinlock_t lock;
91303- int count;
91304+ atomic_t count;
91305 };
91306 };
91307 };
91308@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
91309 extern int lockref_get_not_dead(struct lockref *);
91310
91311 /* Must be called under spinlock for reliable results */
91312-static inline int __lockref_is_dead(const struct lockref *l)
91313+static inline int __lockref_is_dead(const struct lockref *lockref)
91314 {
91315- return ((int)l->count < 0);
91316+ return atomic_read(&lockref->count) < 0;
91317+}
91318+
91319+static inline int __lockref_read(const struct lockref *lockref)
91320+{
91321+ return atomic_read(&lockref->count);
91322+}
91323+
91324+static inline void __lockref_set(struct lockref *lockref, int count)
91325+{
91326+ atomic_set(&lockref->count, count);
91327+}
91328+
91329+static inline void __lockref_inc(struct lockref *lockref)
91330+{
91331+ atomic_inc(&lockref->count);
91332+}
91333+
91334+static inline void __lockref_dec(struct lockref *lockref)
91335+{
91336+ atomic_dec(&lockref->count);
91337 }
91338
91339 #endif /* __LINUX_LOCKREF_H */
91340diff --git a/include/linux/math64.h b/include/linux/math64.h
91341index c45c089..298841c 100644
91342--- a/include/linux/math64.h
91343+++ b/include/linux/math64.h
91344@@ -15,7 +15,7 @@
91345 * This is commonly provided by 32bit archs to provide an optimized 64bit
91346 * divide.
91347 */
91348-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91349+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91350 {
91351 *remainder = dividend % divisor;
91352 return dividend / divisor;
91353@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
91354 /**
91355 * div64_u64 - unsigned 64bit divide with 64bit divisor
91356 */
91357-static inline u64 div64_u64(u64 dividend, u64 divisor)
91358+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
91359 {
91360 return dividend / divisor;
91361 }
91362@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
91363 #define div64_ul(x, y) div_u64((x), (y))
91364
91365 #ifndef div_u64_rem
91366-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91367+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91368 {
91369 *remainder = do_div(dividend, divisor);
91370 return dividend;
91371@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
91372 #endif
91373
91374 #ifndef div64_u64
91375-extern u64 div64_u64(u64 dividend, u64 divisor);
91376+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
91377 #endif
91378
91379 #ifndef div64_s64
91380@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
91381 * divide.
91382 */
91383 #ifndef div_u64
91384-static inline u64 div_u64(u64 dividend, u32 divisor)
91385+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
91386 {
91387 u32 remainder;
91388 return div_u64_rem(dividend, divisor, &remainder);
91389diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
91390index 3d385c8..deacb6a 100644
91391--- a/include/linux/mempolicy.h
91392+++ b/include/linux/mempolicy.h
91393@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
91394 }
91395
91396 #define vma_policy(vma) ((vma)->vm_policy)
91397+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
91398+{
91399+ vma->vm_policy = pol;
91400+}
91401
91402 static inline void mpol_get(struct mempolicy *pol)
91403 {
91404@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
91405 }
91406
91407 #define vma_policy(vma) NULL
91408+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
91409+{
91410+}
91411
91412 static inline int
91413 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
91414diff --git a/include/linux/mm.h b/include/linux/mm.h
91415index 47a9392..ef645bc 100644
91416--- a/include/linux/mm.h
91417+++ b/include/linux/mm.h
91418@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
91419
91420 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
91421 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
91422+
91423+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91424+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
91425+#endif
91426+
91427 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
91428 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
91429 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
91430@@ -254,8 +259,8 @@ struct vm_operations_struct {
91431 /* called by access_process_vm when get_user_pages() fails, typically
91432 * for use by special VMAs that can switch between memory and hardware
91433 */
91434- int (*access)(struct vm_area_struct *vma, unsigned long addr,
91435- void *buf, int len, int write);
91436+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
91437+ void *buf, size_t len, int write);
91438
91439 /* Called by the /proc/PID/maps code to ask the vma whether it
91440 * has a special name. Returning non-NULL will also cause this
91441@@ -293,6 +298,7 @@ struct vm_operations_struct {
91442 struct page *(*find_special_page)(struct vm_area_struct *vma,
91443 unsigned long addr);
91444 };
91445+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
91446
91447 struct mmu_gather;
91448 struct inode;
91449@@ -1213,8 +1219,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
91450 unsigned long *pfn);
91451 int follow_phys(struct vm_area_struct *vma, unsigned long address,
91452 unsigned int flags, unsigned long *prot, resource_size_t *phys);
91453-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91454- void *buf, int len, int write);
91455+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91456+ void *buf, size_t len, int write);
91457
91458 static inline void unmap_shared_mapping_range(struct address_space *mapping,
91459 loff_t const holebegin, loff_t const holelen)
91460@@ -1254,9 +1260,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
91461 }
91462 #endif
91463
91464-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
91465-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91466- void *buf, int len, int write);
91467+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
91468+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
91469+ void *buf, size_t len, int write);
91470
91471 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91472 unsigned long start, unsigned long nr_pages,
91473@@ -1299,34 +1305,6 @@ int set_page_dirty_lock(struct page *page);
91474 int clear_page_dirty_for_io(struct page *page);
91475 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
91476
91477-/* Is the vma a continuation of the stack vma above it? */
91478-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
91479-{
91480- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
91481-}
91482-
91483-static inline int stack_guard_page_start(struct vm_area_struct *vma,
91484- unsigned long addr)
91485-{
91486- return (vma->vm_flags & VM_GROWSDOWN) &&
91487- (vma->vm_start == addr) &&
91488- !vma_growsdown(vma->vm_prev, addr);
91489-}
91490-
91491-/* Is the vma a continuation of the stack vma below it? */
91492-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
91493-{
91494- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
91495-}
91496-
91497-static inline int stack_guard_page_end(struct vm_area_struct *vma,
91498- unsigned long addr)
91499-{
91500- return (vma->vm_flags & VM_GROWSUP) &&
91501- (vma->vm_end == addr) &&
91502- !vma_growsup(vma->vm_next, addr);
91503-}
91504-
91505 extern struct task_struct *task_of_stack(struct task_struct *task,
91506 struct vm_area_struct *vma, bool in_group);
91507
91508@@ -1449,8 +1427,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
91509 {
91510 return 0;
91511 }
91512+
91513+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
91514+ unsigned long address)
91515+{
91516+ return 0;
91517+}
91518 #else
91519 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
91520+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
91521 #endif
91522
91523 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
91524@@ -1460,6 +1445,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
91525 return 0;
91526 }
91527
91528+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
91529+ unsigned long address)
91530+{
91531+ return 0;
91532+}
91533+
91534 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
91535
91536 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
91537@@ -1472,6 +1463,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
91538
91539 #else
91540 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
91541+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
91542
91543 static inline void mm_nr_pmds_init(struct mm_struct *mm)
91544 {
91545@@ -1509,11 +1501,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
91546 NULL: pud_offset(pgd, address);
91547 }
91548
91549+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
91550+{
91551+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
91552+ NULL: pud_offset(pgd, address);
91553+}
91554+
91555 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
91556 {
91557 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
91558 NULL: pmd_offset(pud, address);
91559 }
91560+
91561+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
91562+{
91563+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
91564+ NULL: pmd_offset(pud, address);
91565+}
91566 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
91567
91568 #if USE_SPLIT_PTE_PTLOCKS
91569@@ -1890,12 +1894,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
91570 bool *need_rmap_locks);
91571 extern void exit_mmap(struct mm_struct *);
91572
91573+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
91574+extern void gr_learn_resource(const struct task_struct *task, const int res,
91575+ const unsigned long wanted, const int gt);
91576+#else
91577+static inline void gr_learn_resource(const struct task_struct *task, const int res,
91578+ const unsigned long wanted, const int gt)
91579+{
91580+}
91581+#endif
91582+
91583 static inline int check_data_rlimit(unsigned long rlim,
91584 unsigned long new,
91585 unsigned long start,
91586 unsigned long end_data,
91587 unsigned long start_data)
91588 {
91589+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
91590 if (rlim < RLIM_INFINITY) {
91591 if (((new - start) + (end_data - start_data)) > rlim)
91592 return -ENOSPC;
91593@@ -1920,7 +1935,7 @@ extern int install_special_mapping(struct mm_struct *mm,
91594 unsigned long addr, unsigned long len,
91595 unsigned long flags, struct page **pages);
91596
91597-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
91598+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
91599
91600 extern unsigned long mmap_region(struct file *file, unsigned long addr,
91601 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
91602@@ -1928,6 +1943,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
91603 unsigned long len, unsigned long prot, unsigned long flags,
91604 unsigned long pgoff, unsigned long *populate);
91605 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
91606+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
91607
91608 #ifdef CONFIG_MMU
91609 extern int __mm_populate(unsigned long addr, unsigned long len,
91610@@ -1956,10 +1972,11 @@ struct vm_unmapped_area_info {
91611 unsigned long high_limit;
91612 unsigned long align_mask;
91613 unsigned long align_offset;
91614+ unsigned long threadstack_offset;
91615 };
91616
91617-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
91618-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
91619+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
91620+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
91621
91622 /*
91623 * Search for an unmapped address range.
91624@@ -1971,7 +1988,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
91625 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
91626 */
91627 static inline unsigned long
91628-vm_unmapped_area(struct vm_unmapped_area_info *info)
91629+vm_unmapped_area(const struct vm_unmapped_area_info *info)
91630 {
91631 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
91632 return unmapped_area(info);
91633@@ -2033,6 +2050,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
91634 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
91635 struct vm_area_struct **pprev);
91636
91637+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
91638+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
91639+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
91640+
91641 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
91642 NULL if none. Assume start_addr < end_addr. */
91643 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
91644@@ -2062,10 +2083,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
91645 }
91646
91647 #ifdef CONFIG_MMU
91648-pgprot_t vm_get_page_prot(unsigned long vm_flags);
91649+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
91650 void vma_set_page_prot(struct vm_area_struct *vma);
91651 #else
91652-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
91653+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
91654 {
91655 return __pgprot(0);
91656 }
91657@@ -2127,6 +2148,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
91658 static inline void vm_stat_account(struct mm_struct *mm,
91659 unsigned long flags, struct file *file, long pages)
91660 {
91661+
91662+#ifdef CONFIG_PAX_RANDMMAP
91663+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
91664+#endif
91665+
91666 mm->total_vm += pages;
91667 }
91668 #endif /* CONFIG_PROC_FS */
91669@@ -2229,7 +2255,7 @@ extern int unpoison_memory(unsigned long pfn);
91670 extern int sysctl_memory_failure_early_kill;
91671 extern int sysctl_memory_failure_recovery;
91672 extern void shake_page(struct page *p, int access);
91673-extern atomic_long_t num_poisoned_pages;
91674+extern atomic_long_unchecked_t num_poisoned_pages;
91675 extern int soft_offline_page(struct page *page, int flags);
91676
91677 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
91678@@ -2280,5 +2306,11 @@ void __init setup_nr_node_ids(void);
91679 static inline void setup_nr_node_ids(void) {}
91680 #endif
91681
91682+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
91683+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
91684+#else
91685+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
91686+#endif
91687+
91688 #endif /* __KERNEL__ */
91689 #endif /* _LINUX_MM_H */
91690diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
91691index 199a03a..7328440 100644
91692--- a/include/linux/mm_types.h
91693+++ b/include/linux/mm_types.h
91694@@ -313,7 +313,9 @@ struct vm_area_struct {
91695 #ifdef CONFIG_NUMA
91696 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
91697 #endif
91698-};
91699+
91700+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
91701+} __randomize_layout;
91702
91703 struct core_thread {
91704 struct task_struct *task;
91705@@ -464,7 +466,25 @@ struct mm_struct {
91706 /* address of the bounds directory */
91707 void __user *bd_addr;
91708 #endif
91709-};
91710+
91711+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
91712+ unsigned long pax_flags;
91713+#endif
91714+
91715+#ifdef CONFIG_PAX_DLRESOLVE
91716+ unsigned long call_dl_resolve;
91717+#endif
91718+
91719+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
91720+ unsigned long call_syscall;
91721+#endif
91722+
91723+#ifdef CONFIG_PAX_ASLR
91724+ unsigned long delta_mmap; /* randomized offset */
91725+ unsigned long delta_stack; /* randomized offset */
91726+#endif
91727+
91728+} __randomize_layout;
91729
91730 static inline void mm_init_cpumask(struct mm_struct *mm)
91731 {
91732diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
91733index 160448f..7b332b7 100644
91734--- a/include/linux/mmc/core.h
91735+++ b/include/linux/mmc/core.h
91736@@ -79,7 +79,7 @@ struct mmc_command {
91737 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
91738
91739 unsigned int retries; /* max number of retries */
91740- unsigned int error; /* command error */
91741+ int error; /* command error */
91742
91743 /*
91744 * Standard errno values are used for errors, but some have specific
91745diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
91746index c5d5278..f0b68c8 100644
91747--- a/include/linux/mmiotrace.h
91748+++ b/include/linux/mmiotrace.h
91749@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
91750 /* Called from ioremap.c */
91751 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
91752 void __iomem *addr);
91753-extern void mmiotrace_iounmap(volatile void __iomem *addr);
91754+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
91755
91756 /* For anyone to insert markers. Remember trailing newline. */
91757 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
91758@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
91759 {
91760 }
91761
91762-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
91763+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
91764 {
91765 }
91766
91767diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
91768index 2782df4..abe756e 100644
91769--- a/include/linux/mmzone.h
91770+++ b/include/linux/mmzone.h
91771@@ -526,7 +526,7 @@ struct zone {
91772
91773 ZONE_PADDING(_pad3_)
91774 /* Zone statistics */
91775- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
91776+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
91777 } ____cacheline_internodealigned_in_smp;
91778
91779 enum zone_flags {
91780diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
91781index e530533..c9620c7 100644
91782--- a/include/linux/mod_devicetable.h
91783+++ b/include/linux/mod_devicetable.h
91784@@ -139,7 +139,7 @@ struct usb_device_id {
91785 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
91786 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
91787
91788-#define HID_ANY_ID (~0)
91789+#define HID_ANY_ID (~0U)
91790 #define HID_BUS_ANY 0xffff
91791 #define HID_GROUP_ANY 0x0000
91792
91793@@ -470,7 +470,7 @@ struct dmi_system_id {
91794 const char *ident;
91795 struct dmi_strmatch matches[4];
91796 void *driver_data;
91797-};
91798+} __do_const;
91799 /*
91800 * struct dmi_device_id appears during expansion of
91801 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
91802diff --git a/include/linux/module.h b/include/linux/module.h
91803index b03485b..a26974f 100644
91804--- a/include/linux/module.h
91805+++ b/include/linux/module.h
91806@@ -17,9 +17,11 @@
91807 #include <linux/moduleparam.h>
91808 #include <linux/jump_label.h>
91809 #include <linux/export.h>
91810+#include <linux/fs.h>
91811
91812 #include <linux/percpu.h>
91813 #include <asm/module.h>
91814+#include <asm/pgtable.h>
91815
91816 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
91817 #define MODULE_SIG_STRING "~Module signature appended~\n"
91818@@ -42,7 +44,7 @@ struct module_kobject {
91819 struct kobject *drivers_dir;
91820 struct module_param_attrs *mp;
91821 struct completion *kobj_completion;
91822-};
91823+} __randomize_layout;
91824
91825 struct module_attribute {
91826 struct attribute attr;
91827@@ -54,12 +56,13 @@ struct module_attribute {
91828 int (*test)(struct module *);
91829 void (*free)(struct module *);
91830 };
91831+typedef struct module_attribute __no_const module_attribute_no_const;
91832
91833 struct module_version_attribute {
91834 struct module_attribute mattr;
91835 const char *module_name;
91836 const char *version;
91837-} __attribute__ ((__aligned__(sizeof(void *))));
91838+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
91839
91840 extern ssize_t __modver_version_show(struct module_attribute *,
91841 struct module_kobject *, char *);
91842@@ -221,7 +224,7 @@ struct module {
91843
91844 /* Sysfs stuff. */
91845 struct module_kobject mkobj;
91846- struct module_attribute *modinfo_attrs;
91847+ module_attribute_no_const *modinfo_attrs;
91848 const char *version;
91849 const char *srcversion;
91850 struct kobject *holders_dir;
91851@@ -270,19 +273,16 @@ struct module {
91852 int (*init)(void);
91853
91854 /* If this is non-NULL, vfree after init() returns */
91855- void *module_init;
91856+ void *module_init_rx, *module_init_rw;
91857
91858 /* Here is the actual code + data, vfree'd on unload. */
91859- void *module_core;
91860+ void *module_core_rx, *module_core_rw;
91861
91862 /* Here are the sizes of the init and core sections */
91863- unsigned int init_size, core_size;
91864+ unsigned int init_size_rw, core_size_rw;
91865
91866 /* The size of the executable code in each section. */
91867- unsigned int init_text_size, core_text_size;
91868-
91869- /* Size of RO sections of the module (text+rodata) */
91870- unsigned int init_ro_size, core_ro_size;
91871+ unsigned int init_size_rx, core_size_rx;
91872
91873 /* Arch-specific module values */
91874 struct mod_arch_specific arch;
91875@@ -338,6 +338,10 @@ struct module {
91876 #ifdef CONFIG_EVENT_TRACING
91877 struct ftrace_event_call **trace_events;
91878 unsigned int num_trace_events;
91879+ struct file_operations trace_id;
91880+ struct file_operations trace_enable;
91881+ struct file_operations trace_format;
91882+ struct file_operations trace_filter;
91883 #endif
91884 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
91885 unsigned int num_ftrace_callsites;
91886@@ -365,7 +369,7 @@ struct module {
91887 ctor_fn_t *ctors;
91888 unsigned int num_ctors;
91889 #endif
91890-};
91891+} __randomize_layout;
91892 #ifndef MODULE_ARCH_INIT
91893 #define MODULE_ARCH_INIT {}
91894 #endif
91895@@ -386,18 +390,48 @@ bool is_module_address(unsigned long addr);
91896 bool is_module_percpu_address(unsigned long addr);
91897 bool is_module_text_address(unsigned long addr);
91898
91899+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
91900+{
91901+
91902+#ifdef CONFIG_PAX_KERNEXEC
91903+ if (ktla_ktva(addr) >= (unsigned long)start &&
91904+ ktla_ktva(addr) < (unsigned long)start + size)
91905+ return 1;
91906+#endif
91907+
91908+ return ((void *)addr >= start && (void *)addr < start + size);
91909+}
91910+
91911+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
91912+{
91913+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
91914+}
91915+
91916+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
91917+{
91918+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
91919+}
91920+
91921+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
91922+{
91923+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
91924+}
91925+
91926+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
91927+{
91928+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
91929+}
91930+
91931 static inline bool within_module_core(unsigned long addr,
91932 const struct module *mod)
91933 {
91934- return (unsigned long)mod->module_core <= addr &&
91935- addr < (unsigned long)mod->module_core + mod->core_size;
91936+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
91937 }
91938
91939 static inline bool within_module_init(unsigned long addr,
91940 const struct module *mod)
91941 {
91942- return (unsigned long)mod->module_init <= addr &&
91943- addr < (unsigned long)mod->module_init + mod->init_size;
91944+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
91945 }
91946
91947 static inline bool within_module(unsigned long addr, const struct module *mod)
91948diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
91949index 4d0cb9b..3169ac7 100644
91950--- a/include/linux/moduleloader.h
91951+++ b/include/linux/moduleloader.h
91952@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
91953 sections. Returns NULL on failure. */
91954 void *module_alloc(unsigned long size);
91955
91956+#ifdef CONFIG_PAX_KERNEXEC
91957+void *module_alloc_exec(unsigned long size);
91958+#else
91959+#define module_alloc_exec(x) module_alloc(x)
91960+#endif
91961+
91962 /* Free memory returned from module_alloc. */
91963 void module_memfree(void *module_region);
91964
91965+#ifdef CONFIG_PAX_KERNEXEC
91966+void module_memfree_exec(void *module_region);
91967+#else
91968+#define module_memfree_exec(x) module_memfree((x))
91969+#endif
91970+
91971 /*
91972 * Apply the given relocation to the (simplified) ELF. Return -error
91973 * or 0.
91974@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
91975 unsigned int relsec,
91976 struct module *me)
91977 {
91978+#ifdef CONFIG_MODULES
91979 printk(KERN_ERR "module %s: REL relocation unsupported\n",
91980 module_name(me));
91981+#endif
91982 return -ENOEXEC;
91983 }
91984 #endif
91985@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
91986 unsigned int relsec,
91987 struct module *me)
91988 {
91989+#ifdef CONFIG_MODULES
91990 printk(KERN_ERR "module %s: REL relocation unsupported\n",
91991 module_name(me));
91992+#endif
91993 return -ENOEXEC;
91994 }
91995 #endif
91996diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
91997index 1c9effa..1160bdd 100644
91998--- a/include/linux/moduleparam.h
91999+++ b/include/linux/moduleparam.h
92000@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
92001 * @len is usually just sizeof(string).
92002 */
92003 #define module_param_string(name, string, len, perm) \
92004- static const struct kparam_string __param_string_##name \
92005+ static const struct kparam_string __param_string_##name __used \
92006 = { len, string }; \
92007 __module_param_call(MODULE_PARAM_PREFIX, name, \
92008 &param_ops_string, \
92009@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
92010 */
92011 #define module_param_array_named(name, array, type, nump, perm) \
92012 param_check_##type(name, &(array)[0]); \
92013- static const struct kparam_array __param_arr_##name \
92014+ static const struct kparam_array __param_arr_##name __used \
92015 = { .max = ARRAY_SIZE(array), .num = nump, \
92016 .ops = &param_ops_##type, \
92017 .elemsize = sizeof(array[0]), .elem = array }; \
92018diff --git a/include/linux/mount.h b/include/linux/mount.h
92019index 564beee..653be6f 100644
92020--- a/include/linux/mount.h
92021+++ b/include/linux/mount.h
92022@@ -67,7 +67,7 @@ struct vfsmount {
92023 struct dentry *mnt_root; /* root of the mounted tree */
92024 struct super_block *mnt_sb; /* pointer to superblock */
92025 int mnt_flags;
92026-};
92027+} __randomize_layout;
92028
92029 struct file; /* forward dec */
92030 struct path;
92031diff --git a/include/linux/namei.h b/include/linux/namei.h
92032index c899077..b9a2010 100644
92033--- a/include/linux/namei.h
92034+++ b/include/linux/namei.h
92035@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
92036 extern void unlock_rename(struct dentry *, struct dentry *);
92037
92038 extern void nd_jump_link(struct nameidata *nd, struct path *path);
92039-extern void nd_set_link(struct nameidata *nd, char *path);
92040-extern char *nd_get_link(struct nameidata *nd);
92041+extern void nd_set_link(struct nameidata *nd, const char *path);
92042+extern const char *nd_get_link(const struct nameidata *nd);
92043
92044 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
92045 {
92046diff --git a/include/linux/net.h b/include/linux/net.h
92047index 17d8339..81656c0 100644
92048--- a/include/linux/net.h
92049+++ b/include/linux/net.h
92050@@ -192,7 +192,7 @@ struct net_proto_family {
92051 int (*create)(struct net *net, struct socket *sock,
92052 int protocol, int kern);
92053 struct module *owner;
92054-};
92055+} __do_const;
92056
92057 struct iovec;
92058 struct kvec;
92059diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
92060index 2787388..1dd8e88 100644
92061--- a/include/linux/netdevice.h
92062+++ b/include/linux/netdevice.h
92063@@ -1198,6 +1198,7 @@ struct net_device_ops {
92064 u8 state);
92065 #endif
92066 };
92067+typedef struct net_device_ops __no_const net_device_ops_no_const;
92068
92069 /**
92070 * enum net_device_priv_flags - &struct net_device priv_flags
92071@@ -1546,10 +1547,10 @@ struct net_device {
92072
92073 struct net_device_stats stats;
92074
92075- atomic_long_t rx_dropped;
92076- atomic_long_t tx_dropped;
92077+ atomic_long_unchecked_t rx_dropped;
92078+ atomic_long_unchecked_t tx_dropped;
92079
92080- atomic_t carrier_changes;
92081+ atomic_unchecked_t carrier_changes;
92082
92083 #ifdef CONFIG_WIRELESS_EXT
92084 const struct iw_handler_def * wireless_handlers;
92085diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
92086index 2517ece..0bbfcfb 100644
92087--- a/include/linux/netfilter.h
92088+++ b/include/linux/netfilter.h
92089@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
92090 #endif
92091 /* Use the module struct to lock set/get code in place */
92092 struct module *owner;
92093-};
92094+} __do_const;
92095
92096 /* Function to register/unregister hook points. */
92097 int nf_register_hook(struct nf_hook_ops *reg);
92098diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
92099index e955d47..04a5338 100644
92100--- a/include/linux/netfilter/nfnetlink.h
92101+++ b/include/linux/netfilter/nfnetlink.h
92102@@ -19,7 +19,7 @@ struct nfnl_callback {
92103 const struct nlattr * const cda[]);
92104 const struct nla_policy *policy; /* netlink attribute policy */
92105 const u_int16_t attr_count; /* number of nlattr's */
92106-};
92107+} __do_const;
92108
92109 struct nfnetlink_subsystem {
92110 const char *name;
92111diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
92112new file mode 100644
92113index 0000000..33f4af8
92114--- /dev/null
92115+++ b/include/linux/netfilter/xt_gradm.h
92116@@ -0,0 +1,9 @@
92117+#ifndef _LINUX_NETFILTER_XT_GRADM_H
92118+#define _LINUX_NETFILTER_XT_GRADM_H 1
92119+
92120+struct xt_gradm_mtinfo {
92121+ __u16 flags;
92122+ __u16 invflags;
92123+};
92124+
92125+#endif
92126diff --git a/include/linux/nls.h b/include/linux/nls.h
92127index 520681b..2b7fabb 100644
92128--- a/include/linux/nls.h
92129+++ b/include/linux/nls.h
92130@@ -31,7 +31,7 @@ struct nls_table {
92131 const unsigned char *charset2upper;
92132 struct module *owner;
92133 struct nls_table *next;
92134-};
92135+} __do_const;
92136
92137 /* this value hold the maximum octet of charset */
92138 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
92139@@ -46,7 +46,7 @@ enum utf16_endian {
92140 /* nls_base.c */
92141 extern int __register_nls(struct nls_table *, struct module *);
92142 extern int unregister_nls(struct nls_table *);
92143-extern struct nls_table *load_nls(char *);
92144+extern struct nls_table *load_nls(const char *);
92145 extern void unload_nls(struct nls_table *);
92146 extern struct nls_table *load_nls_default(void);
92147 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
92148diff --git a/include/linux/notifier.h b/include/linux/notifier.h
92149index d14a4c3..a078786 100644
92150--- a/include/linux/notifier.h
92151+++ b/include/linux/notifier.h
92152@@ -54,7 +54,8 @@ struct notifier_block {
92153 notifier_fn_t notifier_call;
92154 struct notifier_block __rcu *next;
92155 int priority;
92156-};
92157+} __do_const;
92158+typedef struct notifier_block __no_const notifier_block_no_const;
92159
92160 struct atomic_notifier_head {
92161 spinlock_t lock;
92162diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
92163index b2a0f15..4d7da32 100644
92164--- a/include/linux/oprofile.h
92165+++ b/include/linux/oprofile.h
92166@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
92167 int oprofilefs_create_ro_ulong(struct dentry * root,
92168 char const * name, ulong * val);
92169
92170-/** Create a file for read-only access to an atomic_t. */
92171+/** Create a file for read-only access to an atomic_unchecked_t. */
92172 int oprofilefs_create_ro_atomic(struct dentry * root,
92173- char const * name, atomic_t * val);
92174+ char const * name, atomic_unchecked_t * val);
92175
92176 /** create a directory */
92177 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
92178diff --git a/include/linux/padata.h b/include/linux/padata.h
92179index 4386946..f50c615 100644
92180--- a/include/linux/padata.h
92181+++ b/include/linux/padata.h
92182@@ -129,7 +129,7 @@ struct parallel_data {
92183 struct padata_serial_queue __percpu *squeue;
92184 atomic_t reorder_objects;
92185 atomic_t refcnt;
92186- atomic_t seq_nr;
92187+ atomic_unchecked_t seq_nr;
92188 struct padata_cpumask cpumask;
92189 spinlock_t lock ____cacheline_aligned;
92190 unsigned int processed;
92191diff --git a/include/linux/path.h b/include/linux/path.h
92192index d137218..be0c176 100644
92193--- a/include/linux/path.h
92194+++ b/include/linux/path.h
92195@@ -1,13 +1,15 @@
92196 #ifndef _LINUX_PATH_H
92197 #define _LINUX_PATH_H
92198
92199+#include <linux/compiler.h>
92200+
92201 struct dentry;
92202 struct vfsmount;
92203
92204 struct path {
92205 struct vfsmount *mnt;
92206 struct dentry *dentry;
92207-};
92208+} __randomize_layout;
92209
92210 extern void path_get(const struct path *);
92211 extern void path_put(const struct path *);
92212diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
92213index 8c78950..0d74ed9 100644
92214--- a/include/linux/pci_hotplug.h
92215+++ b/include/linux/pci_hotplug.h
92216@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
92217 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
92218 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
92219 int (*reset_slot) (struct hotplug_slot *slot, int probe);
92220-};
92221+} __do_const;
92222+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
92223
92224 /**
92225 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
92226diff --git a/include/linux/percpu.h b/include/linux/percpu.h
92227index caebf2a..4c3ae9d 100644
92228--- a/include/linux/percpu.h
92229+++ b/include/linux/percpu.h
92230@@ -34,7 +34,7 @@
92231 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
92232 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
92233 */
92234-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
92235+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
92236 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
92237
92238 /*
92239diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
92240index 2b62198..2b74233 100644
92241--- a/include/linux/perf_event.h
92242+++ b/include/linux/perf_event.h
92243@@ -343,8 +343,8 @@ struct perf_event {
92244
92245 enum perf_event_active_state state;
92246 unsigned int attach_state;
92247- local64_t count;
92248- atomic64_t child_count;
92249+ local64_t count; /* PaX: fix it one day */
92250+ atomic64_unchecked_t child_count;
92251
92252 /*
92253 * These are the total time in nanoseconds that the event
92254@@ -395,8 +395,8 @@ struct perf_event {
92255 * These accumulate total time (in nanoseconds) that children
92256 * events have been enabled and running, respectively.
92257 */
92258- atomic64_t child_total_time_enabled;
92259- atomic64_t child_total_time_running;
92260+ atomic64_unchecked_t child_total_time_enabled;
92261+ atomic64_unchecked_t child_total_time_running;
92262
92263 /*
92264 * Protect attach/detach and child_list:
92265@@ -752,7 +752,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
92266 entry->ip[entry->nr++] = ip;
92267 }
92268
92269-extern int sysctl_perf_event_paranoid;
92270+extern int sysctl_perf_event_legitimately_concerned;
92271 extern int sysctl_perf_event_mlock;
92272 extern int sysctl_perf_event_sample_rate;
92273 extern int sysctl_perf_cpu_time_max_percent;
92274@@ -767,19 +767,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
92275 loff_t *ppos);
92276
92277
92278+static inline bool perf_paranoid_any(void)
92279+{
92280+ return sysctl_perf_event_legitimately_concerned > 2;
92281+}
92282+
92283 static inline bool perf_paranoid_tracepoint_raw(void)
92284 {
92285- return sysctl_perf_event_paranoid > -1;
92286+ return sysctl_perf_event_legitimately_concerned > -1;
92287 }
92288
92289 static inline bool perf_paranoid_cpu(void)
92290 {
92291- return sysctl_perf_event_paranoid > 0;
92292+ return sysctl_perf_event_legitimately_concerned > 0;
92293 }
92294
92295 static inline bool perf_paranoid_kernel(void)
92296 {
92297- return sysctl_perf_event_paranoid > 1;
92298+ return sysctl_perf_event_legitimately_concerned > 1;
92299 }
92300
92301 extern void perf_event_init(void);
92302@@ -912,7 +917,7 @@ struct perf_pmu_events_attr {
92303 struct device_attribute attr;
92304 u64 id;
92305 const char *event_str;
92306-};
92307+} __do_const;
92308
92309 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
92310 char *page);
92311diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
92312index 918b117..7af374b7 100644
92313--- a/include/linux/pid_namespace.h
92314+++ b/include/linux/pid_namespace.h
92315@@ -45,7 +45,7 @@ struct pid_namespace {
92316 int hide_pid;
92317 int reboot; /* group exit code if this pidns was rebooted */
92318 struct ns_common ns;
92319-};
92320+} __randomize_layout;
92321
92322 extern struct pid_namespace init_pid_ns;
92323
92324diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
92325index eb8b8ac..62649e1 100644
92326--- a/include/linux/pipe_fs_i.h
92327+++ b/include/linux/pipe_fs_i.h
92328@@ -47,10 +47,10 @@ struct pipe_inode_info {
92329 struct mutex mutex;
92330 wait_queue_head_t wait;
92331 unsigned int nrbufs, curbuf, buffers;
92332- unsigned int readers;
92333- unsigned int writers;
92334- unsigned int files;
92335- unsigned int waiting_writers;
92336+ atomic_t readers;
92337+ atomic_t writers;
92338+ atomic_t files;
92339+ atomic_t waiting_writers;
92340 unsigned int r_counter;
92341 unsigned int w_counter;
92342 struct page *tmp_page;
92343diff --git a/include/linux/pm.h b/include/linux/pm.h
92344index e2f1be6..78a0506 100644
92345--- a/include/linux/pm.h
92346+++ b/include/linux/pm.h
92347@@ -608,6 +608,7 @@ struct dev_pm_domain {
92348 struct dev_pm_ops ops;
92349 void (*detach)(struct device *dev, bool power_off);
92350 };
92351+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
92352
92353 /*
92354 * The PM_EVENT_ messages are also used by drivers implementing the legacy
92355diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
92356index 080e778..cbdaef7 100644
92357--- a/include/linux/pm_domain.h
92358+++ b/include/linux/pm_domain.h
92359@@ -39,11 +39,11 @@ struct gpd_dev_ops {
92360 int (*save_state)(struct device *dev);
92361 int (*restore_state)(struct device *dev);
92362 bool (*active_wakeup)(struct device *dev);
92363-};
92364+} __no_const;
92365
92366 struct gpd_cpuidle_data {
92367 unsigned int saved_exit_latency;
92368- struct cpuidle_state *idle_state;
92369+ cpuidle_state_no_const *idle_state;
92370 };
92371
92372 struct generic_pm_domain {
92373diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
92374index 30e84d4..22278b4 100644
92375--- a/include/linux/pm_runtime.h
92376+++ b/include/linux/pm_runtime.h
92377@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
92378
92379 static inline void pm_runtime_mark_last_busy(struct device *dev)
92380 {
92381- ACCESS_ONCE(dev->power.last_busy) = jiffies;
92382+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
92383 }
92384
92385 static inline bool pm_runtime_is_irq_safe(struct device *dev)
92386diff --git a/include/linux/pnp.h b/include/linux/pnp.h
92387index 6512e9c..ec27fa2 100644
92388--- a/include/linux/pnp.h
92389+++ b/include/linux/pnp.h
92390@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
92391 struct pnp_fixup {
92392 char id[7];
92393 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
92394-};
92395+} __do_const;
92396
92397 /* config parameters */
92398 #define PNP_CONFIG_NORMAL 0x0001
92399diff --git a/include/linux/poison.h b/include/linux/poison.h
92400index 2110a81..13a11bb 100644
92401--- a/include/linux/poison.h
92402+++ b/include/linux/poison.h
92403@@ -19,8 +19,8 @@
92404 * under normal circumstances, used to verify that nobody uses
92405 * non-initialized list entries.
92406 */
92407-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
92408-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
92409+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
92410+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
92411
92412 /********** include/linux/timer.h **********/
92413 /*
92414diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
92415index d8b187c3..9a9257a 100644
92416--- a/include/linux/power/smartreflex.h
92417+++ b/include/linux/power/smartreflex.h
92418@@ -238,7 +238,7 @@ struct omap_sr_class_data {
92419 int (*notify)(struct omap_sr *sr, u32 status);
92420 u8 notify_flags;
92421 u8 class_type;
92422-};
92423+} __do_const;
92424
92425 /**
92426 * struct omap_sr_nvalue_table - Smartreflex n-target value info
92427diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
92428index 4ea1d37..80f4b33 100644
92429--- a/include/linux/ppp-comp.h
92430+++ b/include/linux/ppp-comp.h
92431@@ -84,7 +84,7 @@ struct compressor {
92432 struct module *owner;
92433 /* Extra skb space needed by the compressor algorithm */
92434 unsigned int comp_extra;
92435-};
92436+} __do_const;
92437
92438 /*
92439 * The return value from decompress routine is the length of the
92440diff --git a/include/linux/preempt.h b/include/linux/preempt.h
92441index de83b4e..c4b997d 100644
92442--- a/include/linux/preempt.h
92443+++ b/include/linux/preempt.h
92444@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
92445 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
92446 #endif
92447
92448+#define raw_preempt_count_add(val) __preempt_count_add(val)
92449+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
92450+
92451 #define __preempt_count_inc() __preempt_count_add(1)
92452 #define __preempt_count_dec() __preempt_count_sub(1)
92453
92454 #define preempt_count_inc() preempt_count_add(1)
92455+#define raw_preempt_count_inc() raw_preempt_count_add(1)
92456 #define preempt_count_dec() preempt_count_sub(1)
92457+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
92458
92459 #ifdef CONFIG_PREEMPT_COUNT
92460
92461@@ -41,6 +46,12 @@ do { \
92462 barrier(); \
92463 } while (0)
92464
92465+#define raw_preempt_disable() \
92466+do { \
92467+ raw_preempt_count_inc(); \
92468+ barrier(); \
92469+} while (0)
92470+
92471 #define sched_preempt_enable_no_resched() \
92472 do { \
92473 barrier(); \
92474@@ -49,6 +60,12 @@ do { \
92475
92476 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
92477
92478+#define raw_preempt_enable_no_resched() \
92479+do { \
92480+ barrier(); \
92481+ raw_preempt_count_dec(); \
92482+} while (0)
92483+
92484 #ifdef CONFIG_PREEMPT
92485 #define preempt_enable() \
92486 do { \
92487@@ -113,8 +130,10 @@ do { \
92488 * region.
92489 */
92490 #define preempt_disable() barrier()
92491+#define raw_preempt_disable() barrier()
92492 #define sched_preempt_enable_no_resched() barrier()
92493 #define preempt_enable_no_resched() barrier()
92494+#define raw_preempt_enable_no_resched() barrier()
92495 #define preempt_enable() barrier()
92496 #define preempt_check_resched() do { } while (0)
92497
92498@@ -128,11 +147,13 @@ do { \
92499 /*
92500 * Modules have no business playing preemption tricks.
92501 */
92502+#ifndef CONFIG_PAX_KERNEXEC
92503 #undef sched_preempt_enable_no_resched
92504 #undef preempt_enable_no_resched
92505 #undef preempt_enable_no_resched_notrace
92506 #undef preempt_check_resched
92507 #endif
92508+#endif
92509
92510 #define preempt_set_need_resched() \
92511 do { \
92512diff --git a/include/linux/printk.h b/include/linux/printk.h
92513index baa3f97..168cff1 100644
92514--- a/include/linux/printk.h
92515+++ b/include/linux/printk.h
92516@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
92517 #endif
92518
92519 typedef int(*printk_func_t)(const char *fmt, va_list args);
92520+extern int kptr_restrict;
92521
92522 #ifdef CONFIG_PRINTK
92523 asmlinkage __printf(5, 0)
92524@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
92525
92526 extern int printk_delay_msec;
92527 extern int dmesg_restrict;
92528-extern int kptr_restrict;
92529
92530 extern void wake_up_klogd(void);
92531
92532diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
92533index b97bf2e..f14c92d4 100644
92534--- a/include/linux/proc_fs.h
92535+++ b/include/linux/proc_fs.h
92536@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
92537 extern struct proc_dir_entry *proc_symlink(const char *,
92538 struct proc_dir_entry *, const char *);
92539 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
92540+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
92541 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
92542 struct proc_dir_entry *, void *);
92543+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
92544+ struct proc_dir_entry *, void *);
92545 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
92546 struct proc_dir_entry *);
92547
92548@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
92549 return proc_create_data(name, mode, parent, proc_fops, NULL);
92550 }
92551
92552+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
92553+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
92554+{
92555+#ifdef CONFIG_GRKERNSEC_PROC_USER
92556+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
92557+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92558+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
92559+#else
92560+ return proc_create_data(name, mode, parent, proc_fops, NULL);
92561+#endif
92562+}
92563+
92564+
92565 extern void proc_set_size(struct proc_dir_entry *, loff_t);
92566 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
92567 extern void *PDE_DATA(const struct inode *);
92568@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
92569 struct proc_dir_entry *parent,const char *dest) { return NULL;}
92570 static inline struct proc_dir_entry *proc_mkdir(const char *name,
92571 struct proc_dir_entry *parent) {return NULL;}
92572+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
92573+ struct proc_dir_entry *parent) { return NULL; }
92574 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
92575 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
92576+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
92577+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
92578 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
92579 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
92580 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
92581@@ -79,7 +99,7 @@ struct net;
92582 static inline struct proc_dir_entry *proc_net_mkdir(
92583 struct net *net, const char *name, struct proc_dir_entry *parent)
92584 {
92585- return proc_mkdir_data(name, 0, parent, net);
92586+ return proc_mkdir_data_restrict(name, 0, parent, net);
92587 }
92588
92589 #endif /* _LINUX_PROC_FS_H */
92590diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
92591index 42dfc61..8113a99 100644
92592--- a/include/linux/proc_ns.h
92593+++ b/include/linux/proc_ns.h
92594@@ -16,7 +16,7 @@ struct proc_ns_operations {
92595 struct ns_common *(*get)(struct task_struct *task);
92596 void (*put)(struct ns_common *ns);
92597 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
92598-};
92599+} __do_const __randomize_layout;
92600
92601 extern const struct proc_ns_operations netns_operations;
92602 extern const struct proc_ns_operations utsns_operations;
92603diff --git a/include/linux/quota.h b/include/linux/quota.h
92604index d534e8e..782e604 100644
92605--- a/include/linux/quota.h
92606+++ b/include/linux/quota.h
92607@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
92608
92609 extern bool qid_eq(struct kqid left, struct kqid right);
92610 extern bool qid_lt(struct kqid left, struct kqid right);
92611-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
92612+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
92613 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
92614 extern bool qid_valid(struct kqid qid);
92615
92616diff --git a/include/linux/random.h b/include/linux/random.h
92617index b05856e..0a9f14e 100644
92618--- a/include/linux/random.h
92619+++ b/include/linux/random.h
92620@@ -9,9 +9,19 @@
92621 #include <uapi/linux/random.h>
92622
92623 extern void add_device_randomness(const void *, unsigned int);
92624+
92625+static inline void add_latent_entropy(void)
92626+{
92627+
92628+#ifdef LATENT_ENTROPY_PLUGIN
92629+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
92630+#endif
92631+
92632+}
92633+
92634 extern void add_input_randomness(unsigned int type, unsigned int code,
92635- unsigned int value);
92636-extern void add_interrupt_randomness(int irq, int irq_flags);
92637+ unsigned int value) __latent_entropy;
92638+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
92639
92640 extern void get_random_bytes(void *buf, int nbytes);
92641 extern void get_random_bytes_arch(void *buf, int nbytes);
92642@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
92643 extern const struct file_operations random_fops, urandom_fops;
92644 #endif
92645
92646-unsigned int get_random_int(void);
92647+unsigned int __intentional_overflow(-1) get_random_int(void);
92648 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
92649
92650-u32 prandom_u32(void);
92651+u32 prandom_u32(void) __intentional_overflow(-1);
92652 void prandom_bytes(void *buf, size_t nbytes);
92653 void prandom_seed(u32 seed);
92654 void prandom_reseed_late(void);
92655@@ -37,6 +47,11 @@ struct rnd_state {
92656 u32 prandom_u32_state(struct rnd_state *state);
92657 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
92658
92659+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
92660+{
92661+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
92662+}
92663+
92664 /**
92665 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
92666 * @ep_ro: right open interval endpoint
92667@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
92668 *
92669 * Returns: pseudo-random number in interval [0, ep_ro)
92670 */
92671-static inline u32 prandom_u32_max(u32 ep_ro)
92672+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
92673 {
92674 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
92675 }
92676diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
92677index 378c5ee..aa84a47 100644
92678--- a/include/linux/rbtree_augmented.h
92679+++ b/include/linux/rbtree_augmented.h
92680@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
92681 old->rbaugmented = rbcompute(old); \
92682 } \
92683 rbstatic const struct rb_augment_callbacks rbname = { \
92684- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
92685+ .propagate = rbname ## _propagate, \
92686+ .copy = rbname ## _copy, \
92687+ .rotate = rbname ## _rotate \
92688 };
92689
92690
92691diff --git a/include/linux/rculist.h b/include/linux/rculist.h
92692index a18b16f..2683096 100644
92693--- a/include/linux/rculist.h
92694+++ b/include/linux/rculist.h
92695@@ -29,8 +29,8 @@
92696 */
92697 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
92698 {
92699- ACCESS_ONCE(list->next) = list;
92700- ACCESS_ONCE(list->prev) = list;
92701+ ACCESS_ONCE_RW(list->next) = list;
92702+ ACCESS_ONCE_RW(list->prev) = list;
92703 }
92704
92705 /*
92706@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
92707 struct list_head *prev, struct list_head *next);
92708 #endif
92709
92710+void __pax_list_add_rcu(struct list_head *new,
92711+ struct list_head *prev, struct list_head *next);
92712+
92713 /**
92714 * list_add_rcu - add a new entry to rcu-protected list
92715 * @new: new entry to be added
92716@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
92717 __list_add_rcu(new, head, head->next);
92718 }
92719
92720+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
92721+{
92722+ __pax_list_add_rcu(new, head, head->next);
92723+}
92724+
92725 /**
92726 * list_add_tail_rcu - add a new entry to rcu-protected list
92727 * @new: new entry to be added
92728@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
92729 __list_add_rcu(new, head->prev, head);
92730 }
92731
92732+static inline void pax_list_add_tail_rcu(struct list_head *new,
92733+ struct list_head *head)
92734+{
92735+ __pax_list_add_rcu(new, head->prev, head);
92736+}
92737+
92738 /**
92739 * list_del_rcu - deletes entry from list without re-initialization
92740 * @entry: the element to delete from the list.
92741@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
92742 entry->prev = LIST_POISON2;
92743 }
92744
92745+extern void pax_list_del_rcu(struct list_head *entry);
92746+
92747 /**
92748 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
92749 * @n: the element to delete from the hash list.
92750diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
92751index 7809749..1cd9315 100644
92752--- a/include/linux/rcupdate.h
92753+++ b/include/linux/rcupdate.h
92754@@ -333,7 +333,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
92755 do { \
92756 rcu_all_qs(); \
92757 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
92758- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
92759+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
92760 } while (0)
92761 #else /* #ifdef CONFIG_TASKS_RCU */
92762 #define TASKS_RCU(x) do { } while (0)
92763diff --git a/include/linux/reboot.h b/include/linux/reboot.h
92764index 67fc8fc..a90f7d8 100644
92765--- a/include/linux/reboot.h
92766+++ b/include/linux/reboot.h
92767@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
92768 */
92769
92770 extern void migrate_to_reboot_cpu(void);
92771-extern void machine_restart(char *cmd);
92772-extern void machine_halt(void);
92773-extern void machine_power_off(void);
92774+extern void machine_restart(char *cmd) __noreturn;
92775+extern void machine_halt(void) __noreturn;
92776+extern void machine_power_off(void) __noreturn;
92777
92778 extern void machine_shutdown(void);
92779 struct pt_regs;
92780@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
92781 */
92782
92783 extern void kernel_restart_prepare(char *cmd);
92784-extern void kernel_restart(char *cmd);
92785-extern void kernel_halt(void);
92786-extern void kernel_power_off(void);
92787+extern void kernel_restart(char *cmd) __noreturn;
92788+extern void kernel_halt(void) __noreturn;
92789+extern void kernel_power_off(void) __noreturn;
92790
92791 extern int C_A_D; /* for sysctl */
92792 void ctrl_alt_del(void);
92793@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
92794 * Emergency restart, callable from an interrupt handler.
92795 */
92796
92797-extern void emergency_restart(void);
92798+extern void emergency_restart(void) __noreturn;
92799 #include <asm/emergency-restart.h>
92800
92801 #endif /* _LINUX_REBOOT_H */
92802diff --git a/include/linux/regset.h b/include/linux/regset.h
92803index 8e0c9fe..ac4d221 100644
92804--- a/include/linux/regset.h
92805+++ b/include/linux/regset.h
92806@@ -161,7 +161,8 @@ struct user_regset {
92807 unsigned int align;
92808 unsigned int bias;
92809 unsigned int core_note_type;
92810-};
92811+} __do_const;
92812+typedef struct user_regset __no_const user_regset_no_const;
92813
92814 /**
92815 * struct user_regset_view - available regsets
92816diff --git a/include/linux/relay.h b/include/linux/relay.h
92817index d7c8359..818daf5 100644
92818--- a/include/linux/relay.h
92819+++ b/include/linux/relay.h
92820@@ -157,7 +157,7 @@ struct rchan_callbacks
92821 * The callback should return 0 if successful, negative if not.
92822 */
92823 int (*remove_buf_file)(struct dentry *dentry);
92824-};
92825+} __no_const;
92826
92827 /*
92828 * CONFIG_RELAY kernel API, kernel/relay.c
92829diff --git a/include/linux/rio.h b/include/linux/rio.h
92830index 6bda06f..bf39a9b 100644
92831--- a/include/linux/rio.h
92832+++ b/include/linux/rio.h
92833@@ -358,7 +358,7 @@ struct rio_ops {
92834 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
92835 u64 rstart, u32 size, u32 flags);
92836 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
92837-};
92838+} __no_const;
92839
92840 #define RIO_RESOURCE_MEM 0x00000100
92841 #define RIO_RESOURCE_DOORBELL 0x00000200
92842diff --git a/include/linux/rmap.h b/include/linux/rmap.h
92843index c4c559a..6ba9a26 100644
92844--- a/include/linux/rmap.h
92845+++ b/include/linux/rmap.h
92846@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
92847 void anon_vma_init(void); /* create anon_vma_cachep */
92848 int anon_vma_prepare(struct vm_area_struct *);
92849 void unlink_anon_vmas(struct vm_area_struct *);
92850-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
92851-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
92852+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
92853+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
92854
92855 static inline void anon_vma_merge(struct vm_area_struct *vma,
92856 struct vm_area_struct *next)
92857diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
92858index ed8f9e70..999bc96 100644
92859--- a/include/linux/scatterlist.h
92860+++ b/include/linux/scatterlist.h
92861@@ -1,6 +1,7 @@
92862 #ifndef _LINUX_SCATTERLIST_H
92863 #define _LINUX_SCATTERLIST_H
92864
92865+#include <linux/sched.h>
92866 #include <linux/string.h>
92867 #include <linux/bug.h>
92868 #include <linux/mm.h>
92869@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
92870 #ifdef CONFIG_DEBUG_SG
92871 BUG_ON(!virt_addr_valid(buf));
92872 #endif
92873+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
92874+ if (object_starts_on_stack(buf)) {
92875+ void *adjbuf = buf - current->stack + current->lowmem_stack;
92876+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
92877+ } else
92878+#endif
92879 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
92880 }
92881
92882diff --git a/include/linux/sched.h b/include/linux/sched.h
92883index 51348f7..8c8b0ba 100644
92884--- a/include/linux/sched.h
92885+++ b/include/linux/sched.h
92886@@ -133,6 +133,7 @@ struct fs_struct;
92887 struct perf_event_context;
92888 struct blk_plug;
92889 struct filename;
92890+struct linux_binprm;
92891
92892 #define VMACACHE_BITS 2
92893 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
92894@@ -420,7 +421,7 @@ extern char __sched_text_start[], __sched_text_end[];
92895 extern int in_sched_functions(unsigned long addr);
92896
92897 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
92898-extern signed long schedule_timeout(signed long timeout);
92899+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
92900 extern signed long schedule_timeout_interruptible(signed long timeout);
92901 extern signed long schedule_timeout_killable(signed long timeout);
92902 extern signed long schedule_timeout_uninterruptible(signed long timeout);
92903@@ -438,6 +439,19 @@ struct nsproxy;
92904 struct user_namespace;
92905
92906 #ifdef CONFIG_MMU
92907+
92908+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
92909+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
92910+#else
92911+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
92912+{
92913+ return 0;
92914+}
92915+#endif
92916+
92917+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
92918+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
92919+
92920 extern void arch_pick_mmap_layout(struct mm_struct *mm);
92921 extern unsigned long
92922 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
92923@@ -736,6 +750,17 @@ struct signal_struct {
92924 #ifdef CONFIG_TASKSTATS
92925 struct taskstats *stats;
92926 #endif
92927+
92928+#ifdef CONFIG_GRKERNSEC
92929+ u32 curr_ip;
92930+ u32 saved_ip;
92931+ u32 gr_saddr;
92932+ u32 gr_daddr;
92933+ u16 gr_sport;
92934+ u16 gr_dport;
92935+ u8 used_accept:1;
92936+#endif
92937+
92938 #ifdef CONFIG_AUDIT
92939 unsigned audit_tty;
92940 unsigned audit_tty_log_passwd;
92941@@ -762,7 +787,7 @@ struct signal_struct {
92942 struct mutex cred_guard_mutex; /* guard against foreign influences on
92943 * credential calculations
92944 * (notably. ptrace) */
92945-};
92946+} __randomize_layout;
92947
92948 /*
92949 * Bits in flags field of signal_struct.
92950@@ -815,6 +840,14 @@ struct user_struct {
92951 struct key *session_keyring; /* UID's default session keyring */
92952 #endif
92953
92954+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
92955+ unsigned char kernel_banned;
92956+#endif
92957+#ifdef CONFIG_GRKERNSEC_BRUTE
92958+ unsigned char suid_banned;
92959+ unsigned long suid_ban_expires;
92960+#endif
92961+
92962 /* Hash table maintenance information */
92963 struct hlist_node uidhash_node;
92964 kuid_t uid;
92965@@ -822,7 +855,7 @@ struct user_struct {
92966 #ifdef CONFIG_PERF_EVENTS
92967 atomic_long_t locked_vm;
92968 #endif
92969-};
92970+} __randomize_layout;
92971
92972 extern int uids_sysfs_init(void);
92973
92974@@ -1286,6 +1319,9 @@ enum perf_event_task_context {
92975 struct task_struct {
92976 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
92977 void *stack;
92978+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
92979+ void *lowmem_stack;
92980+#endif
92981 atomic_t usage;
92982 unsigned int flags; /* per process flags, defined below */
92983 unsigned int ptrace;
92984@@ -1419,8 +1455,8 @@ struct task_struct {
92985 struct list_head thread_node;
92986
92987 struct completion *vfork_done; /* for vfork() */
92988- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
92989- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
92990+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
92991+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
92992
92993 cputime_t utime, stime, utimescaled, stimescaled;
92994 cputime_t gtime;
92995@@ -1445,11 +1481,6 @@ struct task_struct {
92996 struct task_cputime cputime_expires;
92997 struct list_head cpu_timers[3];
92998
92999-/* process credentials */
93000- const struct cred __rcu *real_cred; /* objective and real subjective task
93001- * credentials (COW) */
93002- const struct cred __rcu *cred; /* effective (overridable) subjective task
93003- * credentials (COW) */
93004 char comm[TASK_COMM_LEN]; /* executable name excluding path
93005 - access with [gs]et_task_comm (which lock
93006 it with task_lock())
93007@@ -1467,6 +1498,10 @@ struct task_struct {
93008 #endif
93009 /* CPU-specific state of this task */
93010 struct thread_struct thread;
93011+/* thread_info moved to task_struct */
93012+#ifdef CONFIG_X86
93013+ struct thread_info tinfo;
93014+#endif
93015 /* filesystem information */
93016 struct fs_struct *fs;
93017 /* open file information */
93018@@ -1541,6 +1576,10 @@ struct task_struct {
93019 gfp_t lockdep_reclaim_gfp;
93020 #endif
93021
93022+/* process credentials */
93023+ const struct cred __rcu *real_cred; /* objective and real subjective task
93024+ * credentials (COW) */
93025+
93026 /* journalling filesystem info */
93027 void *journal_info;
93028
93029@@ -1579,6 +1618,10 @@ struct task_struct {
93030 /* cg_list protected by css_set_lock and tsk->alloc_lock */
93031 struct list_head cg_list;
93032 #endif
93033+
93034+ const struct cred __rcu *cred; /* effective (overridable) subjective task
93035+ * credentials (COW) */
93036+
93037 #ifdef CONFIG_FUTEX
93038 struct robust_list_head __user *robust_list;
93039 #ifdef CONFIG_COMPAT
93040@@ -1690,7 +1733,7 @@ struct task_struct {
93041 * Number of functions that haven't been traced
93042 * because of depth overrun.
93043 */
93044- atomic_t trace_overrun;
93045+ atomic_unchecked_t trace_overrun;
93046 /* Pause for the tracing */
93047 atomic_t tracing_graph_pause;
93048 #endif
93049@@ -1718,7 +1761,78 @@ struct task_struct {
93050 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
93051 unsigned long task_state_change;
93052 #endif
93053-};
93054+
93055+#ifdef CONFIG_GRKERNSEC
93056+ /* grsecurity */
93057+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
93058+ u64 exec_id;
93059+#endif
93060+#ifdef CONFIG_GRKERNSEC_SETXID
93061+ const struct cred *delayed_cred;
93062+#endif
93063+ struct dentry *gr_chroot_dentry;
93064+ struct acl_subject_label *acl;
93065+ struct acl_subject_label *tmpacl;
93066+ struct acl_role_label *role;
93067+ struct file *exec_file;
93068+ unsigned long brute_expires;
93069+ u16 acl_role_id;
93070+ u8 inherited;
93071+ /* is this the task that authenticated to the special role */
93072+ u8 acl_sp_role;
93073+ u8 is_writable;
93074+ u8 brute;
93075+ u8 gr_is_chrooted;
93076+#endif
93077+
93078+} __randomize_layout;
93079+
93080+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
93081+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
93082+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
93083+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
93084+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
93085+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
93086+
93087+#ifdef CONFIG_PAX_SOFTMODE
93088+extern int pax_softmode;
93089+#endif
93090+
93091+extern int pax_check_flags(unsigned long *);
93092+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
93093+
93094+/* if tsk != current then task_lock must be held on it */
93095+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
93096+static inline unsigned long pax_get_flags(struct task_struct *tsk)
93097+{
93098+ if (likely(tsk->mm))
93099+ return tsk->mm->pax_flags;
93100+ else
93101+ return 0UL;
93102+}
93103+
93104+/* if tsk != current then task_lock must be held on it */
93105+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
93106+{
93107+ if (likely(tsk->mm)) {
93108+ tsk->mm->pax_flags = flags;
93109+ return 0;
93110+ }
93111+ return -EINVAL;
93112+}
93113+#endif
93114+
93115+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
93116+extern void pax_set_initial_flags(struct linux_binprm *bprm);
93117+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
93118+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
93119+#endif
93120+
93121+struct path;
93122+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
93123+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
93124+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
93125+extern void pax_report_refcount_overflow(struct pt_regs *regs);
93126
93127 /* Future-safe accessor for struct task_struct's cpus_allowed. */
93128 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
93129@@ -1801,7 +1915,7 @@ struct pid_namespace;
93130 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
93131 struct pid_namespace *ns);
93132
93133-static inline pid_t task_pid_nr(struct task_struct *tsk)
93134+static inline pid_t task_pid_nr(const struct task_struct *tsk)
93135 {
93136 return tsk->pid;
93137 }
93138@@ -2169,6 +2283,25 @@ extern u64 sched_clock_cpu(int cpu);
93139
93140 extern void sched_clock_init(void);
93141
93142+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
93143+static inline void populate_stack(void)
93144+{
93145+ struct task_struct *curtask = current;
93146+ int c;
93147+ int *ptr = curtask->stack;
93148+ int *end = curtask->stack + THREAD_SIZE;
93149+
93150+ while (ptr < end) {
93151+ c = *(volatile int *)ptr;
93152+ ptr += PAGE_SIZE/sizeof(int);
93153+ }
93154+}
93155+#else
93156+static inline void populate_stack(void)
93157+{
93158+}
93159+#endif
93160+
93161 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
93162 static inline void sched_clock_tick(void)
93163 {
93164@@ -2302,7 +2435,9 @@ void yield(void);
93165 extern struct exec_domain default_exec_domain;
93166
93167 union thread_union {
93168+#ifndef CONFIG_X86
93169 struct thread_info thread_info;
93170+#endif
93171 unsigned long stack[THREAD_SIZE/sizeof(long)];
93172 };
93173
93174@@ -2335,6 +2470,7 @@ extern struct pid_namespace init_pid_ns;
93175 */
93176
93177 extern struct task_struct *find_task_by_vpid(pid_t nr);
93178+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
93179 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
93180 struct pid_namespace *ns);
93181
93182@@ -2499,7 +2635,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
93183 extern void exit_itimers(struct signal_struct *);
93184 extern void flush_itimer_signals(void);
93185
93186-extern void do_group_exit(int);
93187+extern __noreturn void do_group_exit(int);
93188
93189 extern int do_execve(struct filename *,
93190 const char __user * const __user *,
93191@@ -2720,9 +2856,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
93192 #define task_stack_end_corrupted(task) \
93193 (*(end_of_stack(task)) != STACK_END_MAGIC)
93194
93195-static inline int object_is_on_stack(void *obj)
93196+static inline int object_starts_on_stack(const void *obj)
93197 {
93198- void *stack = task_stack_page(current);
93199+ const void *stack = task_stack_page(current);
93200
93201 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
93202 }
93203diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
93204index 596a0e0..bea77ec 100644
93205--- a/include/linux/sched/sysctl.h
93206+++ b/include/linux/sched/sysctl.h
93207@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
93208 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
93209
93210 extern int sysctl_max_map_count;
93211+extern unsigned long sysctl_heap_stack_gap;
93212
93213 extern unsigned int sysctl_sched_latency;
93214 extern unsigned int sysctl_sched_min_granularity;
93215diff --git a/include/linux/security.h b/include/linux/security.h
93216index a1b7dbd..036f47f 100644
93217--- a/include/linux/security.h
93218+++ b/include/linux/security.h
93219@@ -27,6 +27,7 @@
93220 #include <linux/slab.h>
93221 #include <linux/err.h>
93222 #include <linux/string.h>
93223+#include <linux/grsecurity.h>
93224
93225 struct linux_binprm;
93226 struct cred;
93227@@ -116,8 +117,6 @@ struct seq_file;
93228
93229 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
93230
93231-void reset_security_ops(void);
93232-
93233 #ifdef CONFIG_MMU
93234 extern unsigned long mmap_min_addr;
93235 extern unsigned long dac_mmap_min_addr;
93236@@ -1756,7 +1755,7 @@ struct security_operations {
93237 struct audit_context *actx);
93238 void (*audit_rule_free) (void *lsmrule);
93239 #endif /* CONFIG_AUDIT */
93240-};
93241+} __randomize_layout;
93242
93243 /* prototypes */
93244 extern int security_init(void);
93245diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
93246index dc368b8..e895209 100644
93247--- a/include/linux/semaphore.h
93248+++ b/include/linux/semaphore.h
93249@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
93250 }
93251
93252 extern void down(struct semaphore *sem);
93253-extern int __must_check down_interruptible(struct semaphore *sem);
93254+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
93255 extern int __must_check down_killable(struct semaphore *sem);
93256 extern int __must_check down_trylock(struct semaphore *sem);
93257 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
93258diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
93259index afbb1fd..e1d205d 100644
93260--- a/include/linux/seq_file.h
93261+++ b/include/linux/seq_file.h
93262@@ -27,6 +27,9 @@ struct seq_file {
93263 struct mutex lock;
93264 const struct seq_operations *op;
93265 int poll_event;
93266+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
93267+ u64 exec_id;
93268+#endif
93269 #ifdef CONFIG_USER_NS
93270 struct user_namespace *user_ns;
93271 #endif
93272@@ -39,6 +42,7 @@ struct seq_operations {
93273 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
93274 int (*show) (struct seq_file *m, void *v);
93275 };
93276+typedef struct seq_operations __no_const seq_operations_no_const;
93277
93278 #define SEQ_SKIP 1
93279
93280@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
93281
93282 char *mangle_path(char *s, const char *p, const char *esc);
93283 int seq_open(struct file *, const struct seq_operations *);
93284+int seq_open_restrict(struct file *, const struct seq_operations *);
93285 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
93286 loff_t seq_lseek(struct file *, loff_t, int);
93287 int seq_release(struct inode *, struct file *);
93288@@ -128,6 +133,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
93289 const struct path *root, const char *esc);
93290
93291 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
93292+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
93293 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
93294 int single_release(struct inode *, struct file *);
93295 void *__seq_open_private(struct file *, const struct seq_operations *, int);
93296diff --git a/include/linux/shm.h b/include/linux/shm.h
93297index 6fb8016..ab4465e 100644
93298--- a/include/linux/shm.h
93299+++ b/include/linux/shm.h
93300@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
93301 /* The task created the shm object. NULL if the task is dead. */
93302 struct task_struct *shm_creator;
93303 struct list_head shm_clist; /* list by creator */
93304+#ifdef CONFIG_GRKERNSEC
93305+ u64 shm_createtime;
93306+ pid_t shm_lapid;
93307+#endif
93308 };
93309
93310 /* shm_mode upper byte flags */
93311diff --git a/include/linux/signal.h b/include/linux/signal.h
93312index ab1e039..ad4229e 100644
93313--- a/include/linux/signal.h
93314+++ b/include/linux/signal.h
93315@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
93316 * know it'll be handled, so that they don't get converted to
93317 * SIGKILL or just silently dropped.
93318 */
93319- kernel_sigaction(sig, (__force __sighandler_t)2);
93320+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
93321 }
93322
93323 static inline void disallow_signal(int sig)
93324diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
93325index bdccc4b..e9f8670 100644
93326--- a/include/linux/skbuff.h
93327+++ b/include/linux/skbuff.h
93328@@ -771,7 +771,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
93329 int node);
93330 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
93331 struct sk_buff *build_skb(void *data, unsigned int frag_size);
93332-static inline struct sk_buff *alloc_skb(unsigned int size,
93333+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
93334 gfp_t priority)
93335 {
93336 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
93337@@ -1967,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
93338 return skb->inner_transport_header - skb->inner_network_header;
93339 }
93340
93341-static inline int skb_network_offset(const struct sk_buff *skb)
93342+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
93343 {
93344 return skb_network_header(skb) - skb->data;
93345 }
93346@@ -2027,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
93347 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
93348 */
93349 #ifndef NET_SKB_PAD
93350-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
93351+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
93352 #endif
93353
93354 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
93355@@ -2669,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
93356 int *err);
93357 unsigned int datagram_poll(struct file *file, struct socket *sock,
93358 struct poll_table_struct *wait);
93359-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
93360+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
93361 struct iov_iter *to, int size);
93362-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
93363+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
93364 struct msghdr *msg, int size)
93365 {
93366 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
93367@@ -3193,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
93368 nf_bridge_put(skb->nf_bridge);
93369 skb->nf_bridge = NULL;
93370 #endif
93371+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
93372+ skb->nf_trace = 0;
93373+#endif
93374 }
93375
93376 static inline void nf_reset_trace(struct sk_buff *skb)
93377diff --git a/include/linux/slab.h b/include/linux/slab.h
93378index 76f1fee..d95e6d2 100644
93379--- a/include/linux/slab.h
93380+++ b/include/linux/slab.h
93381@@ -14,15 +14,29 @@
93382 #include <linux/gfp.h>
93383 #include <linux/types.h>
93384 #include <linux/workqueue.h>
93385-
93386+#include <linux/err.h>
93387
93388 /*
93389 * Flags to pass to kmem_cache_create().
93390 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
93391 */
93392 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
93393+
93394+#ifdef CONFIG_PAX_USERCOPY_SLABS
93395+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
93396+#else
93397+#define SLAB_USERCOPY 0x00000000UL
93398+#endif
93399+
93400 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
93401 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
93402+
93403+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93404+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
93405+#else
93406+#define SLAB_NO_SANITIZE 0x00000000UL
93407+#endif
93408+
93409 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
93410 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
93411 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
93412@@ -98,10 +112,13 @@
93413 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
93414 * Both make kfree a no-op.
93415 */
93416-#define ZERO_SIZE_PTR ((void *)16)
93417+#define ZERO_SIZE_PTR \
93418+({ \
93419+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
93420+ (void *)(-MAX_ERRNO-1L); \
93421+})
93422
93423-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
93424- (unsigned long)ZERO_SIZE_PTR)
93425+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
93426
93427 #include <linux/kmemleak.h>
93428 #include <linux/kasan.h>
93429@@ -143,6 +160,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
93430 void kfree(const void *);
93431 void kzfree(const void *);
93432 size_t ksize(const void *);
93433+const char *check_heap_object(const void *ptr, unsigned long n);
93434+bool is_usercopy_object(const void *ptr);
93435
93436 /*
93437 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
93438@@ -235,6 +254,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
93439 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93440 #endif
93441
93442+#ifdef CONFIG_PAX_USERCOPY_SLABS
93443+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
93444+#endif
93445+
93446 /*
93447 * Figure out which kmalloc slab an allocation of a certain size
93448 * belongs to.
93449@@ -243,7 +266,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93450 * 2 = 120 .. 192 bytes
93451 * n = 2^(n-1) .. 2^n -1
93452 */
93453-static __always_inline int kmalloc_index(size_t size)
93454+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
93455 {
93456 if (!size)
93457 return 0;
93458@@ -286,15 +309,15 @@ static __always_inline int kmalloc_index(size_t size)
93459 }
93460 #endif /* !CONFIG_SLOB */
93461
93462-void *__kmalloc(size_t size, gfp_t flags);
93463+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
93464 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
93465 void kmem_cache_free(struct kmem_cache *, void *);
93466
93467 #ifdef CONFIG_NUMA
93468-void *__kmalloc_node(size_t size, gfp_t flags, int node);
93469+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
93470 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
93471 #else
93472-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
93473+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
93474 {
93475 return __kmalloc(size, flags);
93476 }
93477diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
93478index 33d0490..70a6313 100644
93479--- a/include/linux/slab_def.h
93480+++ b/include/linux/slab_def.h
93481@@ -40,7 +40,7 @@ struct kmem_cache {
93482 /* 4) cache creation/removal */
93483 const char *name;
93484 struct list_head list;
93485- int refcount;
93486+ atomic_t refcount;
93487 int object_size;
93488 int align;
93489
93490@@ -56,10 +56,14 @@ struct kmem_cache {
93491 unsigned long node_allocs;
93492 unsigned long node_frees;
93493 unsigned long node_overflow;
93494- atomic_t allochit;
93495- atomic_t allocmiss;
93496- atomic_t freehit;
93497- atomic_t freemiss;
93498+ atomic_unchecked_t allochit;
93499+ atomic_unchecked_t allocmiss;
93500+ atomic_unchecked_t freehit;
93501+ atomic_unchecked_t freemiss;
93502+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93503+ atomic_unchecked_t sanitized;
93504+ atomic_unchecked_t not_sanitized;
93505+#endif
93506
93507 /*
93508 * If debugging is enabled, then the allocator can add additional
93509diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
93510index 3388511..6252f90 100644
93511--- a/include/linux/slub_def.h
93512+++ b/include/linux/slub_def.h
93513@@ -74,7 +74,7 @@ struct kmem_cache {
93514 struct kmem_cache_order_objects max;
93515 struct kmem_cache_order_objects min;
93516 gfp_t allocflags; /* gfp flags to use on each alloc */
93517- int refcount; /* Refcount for slab cache destroy */
93518+ atomic_t refcount; /* Refcount for slab cache destroy */
93519 void (*ctor)(void *);
93520 int inuse; /* Offset to metadata */
93521 int align; /* Alignment */
93522diff --git a/include/linux/smp.h b/include/linux/smp.h
93523index be91db2..3f23232 100644
93524--- a/include/linux/smp.h
93525+++ b/include/linux/smp.h
93526@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
93527 #endif
93528
93529 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
93530+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
93531 #define put_cpu() preempt_enable()
93532+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
93533
93534 /*
93535 * Callback to arch code if there's nosmp or maxcpus=0 on the
93536diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
93537index 46cca4c..3323536 100644
93538--- a/include/linux/sock_diag.h
93539+++ b/include/linux/sock_diag.h
93540@@ -11,7 +11,7 @@ struct sock;
93541 struct sock_diag_handler {
93542 __u8 family;
93543 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
93544-};
93545+} __do_const;
93546
93547 int sock_diag_register(const struct sock_diag_handler *h);
93548 void sock_diag_unregister(const struct sock_diag_handler *h);
93549diff --git a/include/linux/sonet.h b/include/linux/sonet.h
93550index 680f9a3..f13aeb0 100644
93551--- a/include/linux/sonet.h
93552+++ b/include/linux/sonet.h
93553@@ -7,7 +7,7 @@
93554 #include <uapi/linux/sonet.h>
93555
93556 struct k_sonet_stats {
93557-#define __HANDLE_ITEM(i) atomic_t i
93558+#define __HANDLE_ITEM(i) atomic_unchecked_t i
93559 __SONET_ITEMS
93560 #undef __HANDLE_ITEM
93561 };
93562diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
93563index 07d8e53..dc934c9 100644
93564--- a/include/linux/sunrpc/addr.h
93565+++ b/include/linux/sunrpc/addr.h
93566@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
93567 {
93568 switch (sap->sa_family) {
93569 case AF_INET:
93570- return ntohs(((struct sockaddr_in *)sap)->sin_port);
93571+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
93572 case AF_INET6:
93573- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
93574+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
93575 }
93576 return 0;
93577 }
93578@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
93579 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
93580 const struct sockaddr *src)
93581 {
93582- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
93583+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
93584 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
93585
93586 dsin->sin_family = ssin->sin_family;
93587@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
93588 if (sa->sa_family != AF_INET6)
93589 return 0;
93590
93591- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
93592+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
93593 }
93594
93595 #endif /* _LINUX_SUNRPC_ADDR_H */
93596diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
93597index 598ba80..d90cba6 100644
93598--- a/include/linux/sunrpc/clnt.h
93599+++ b/include/linux/sunrpc/clnt.h
93600@@ -100,7 +100,7 @@ struct rpc_procinfo {
93601 unsigned int p_timer; /* Which RTT timer to use */
93602 u32 p_statidx; /* Which procedure to account */
93603 const char * p_name; /* name of procedure */
93604-};
93605+} __do_const;
93606
93607 #ifdef __KERNEL__
93608
93609diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
93610index fae6fb9..023fbcd 100644
93611--- a/include/linux/sunrpc/svc.h
93612+++ b/include/linux/sunrpc/svc.h
93613@@ -420,7 +420,7 @@ struct svc_procedure {
93614 unsigned int pc_count; /* call count */
93615 unsigned int pc_cachetype; /* cache info (NFS) */
93616 unsigned int pc_xdrressize; /* maximum size of XDR reply */
93617-};
93618+} __do_const;
93619
93620 /*
93621 * Function prototypes.
93622diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
93623index df8edf8..d140fec 100644
93624--- a/include/linux/sunrpc/svc_rdma.h
93625+++ b/include/linux/sunrpc/svc_rdma.h
93626@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
93627 extern unsigned int svcrdma_max_requests;
93628 extern unsigned int svcrdma_max_req_size;
93629
93630-extern atomic_t rdma_stat_recv;
93631-extern atomic_t rdma_stat_read;
93632-extern atomic_t rdma_stat_write;
93633-extern atomic_t rdma_stat_sq_starve;
93634-extern atomic_t rdma_stat_rq_starve;
93635-extern atomic_t rdma_stat_rq_poll;
93636-extern atomic_t rdma_stat_rq_prod;
93637-extern atomic_t rdma_stat_sq_poll;
93638-extern atomic_t rdma_stat_sq_prod;
93639+extern atomic_unchecked_t rdma_stat_recv;
93640+extern atomic_unchecked_t rdma_stat_read;
93641+extern atomic_unchecked_t rdma_stat_write;
93642+extern atomic_unchecked_t rdma_stat_sq_starve;
93643+extern atomic_unchecked_t rdma_stat_rq_starve;
93644+extern atomic_unchecked_t rdma_stat_rq_poll;
93645+extern atomic_unchecked_t rdma_stat_rq_prod;
93646+extern atomic_unchecked_t rdma_stat_sq_poll;
93647+extern atomic_unchecked_t rdma_stat_sq_prod;
93648
93649 /*
93650 * Contexts are built when an RDMA request is created and are a
93651diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
93652index 8d71d65..f79586e 100644
93653--- a/include/linux/sunrpc/svcauth.h
93654+++ b/include/linux/sunrpc/svcauth.h
93655@@ -120,7 +120,7 @@ struct auth_ops {
93656 int (*release)(struct svc_rqst *rq);
93657 void (*domain_release)(struct auth_domain *);
93658 int (*set_client)(struct svc_rqst *rq);
93659-};
93660+} __do_const;
93661
93662 #define SVC_GARBAGE 1
93663 #define SVC_SYSERR 2
93664diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
93665index e7a018e..49f8b17 100644
93666--- a/include/linux/swiotlb.h
93667+++ b/include/linux/swiotlb.h
93668@@ -60,7 +60,8 @@ extern void
93669
93670 extern void
93671 swiotlb_free_coherent(struct device *hwdev, size_t size,
93672- void *vaddr, dma_addr_t dma_handle);
93673+ void *vaddr, dma_addr_t dma_handle,
93674+ struct dma_attrs *attrs);
93675
93676 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
93677 unsigned long offset, size_t size,
93678diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
93679index 76d1e38..200776e 100644
93680--- a/include/linux/syscalls.h
93681+++ b/include/linux/syscalls.h
93682@@ -102,7 +102,12 @@ union bpf_attr;
93683 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
93684 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
93685 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
93686-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
93687+#define __SC_LONG(t, a) __typeof__( \
93688+ __builtin_choose_expr( \
93689+ sizeof(t) > sizeof(int), \
93690+ (t) 0, \
93691+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
93692+ )) a
93693 #define __SC_CAST(t, a) (t) a
93694 #define __SC_ARGS(t, a) a
93695 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
93696@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
93697 asmlinkage long sys_fsync(unsigned int fd);
93698 asmlinkage long sys_fdatasync(unsigned int fd);
93699 asmlinkage long sys_bdflush(int func, long data);
93700-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
93701- char __user *type, unsigned long flags,
93702+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
93703+ const char __user *type, unsigned long flags,
93704 void __user *data);
93705-asmlinkage long sys_umount(char __user *name, int flags);
93706-asmlinkage long sys_oldumount(char __user *name);
93707+asmlinkage long sys_umount(const char __user *name, int flags);
93708+asmlinkage long sys_oldumount(const char __user *name);
93709 asmlinkage long sys_truncate(const char __user *path, long length);
93710 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
93711 asmlinkage long sys_stat(const char __user *filename,
93712@@ -604,7 +609,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
93713 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
93714 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
93715 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
93716- struct sockaddr __user *, int);
93717+ struct sockaddr __user *, int) __intentional_overflow(0);
93718 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
93719 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
93720 unsigned int vlen, unsigned flags);
93721@@ -663,10 +668,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
93722
93723 asmlinkage long sys_semget(key_t key, int nsems, int semflg);
93724 asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
93725- unsigned nsops);
93726+ long nsops);
93727 asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
93728 asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
93729- unsigned nsops,
93730+ long nsops,
93731 const struct timespec __user *timeout);
93732 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
93733 asmlinkage long sys_shmget(key_t key, size_t size, int flag);
93734diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
93735index 27b3b0b..e093dd9 100644
93736--- a/include/linux/syscore_ops.h
93737+++ b/include/linux/syscore_ops.h
93738@@ -16,7 +16,7 @@ struct syscore_ops {
93739 int (*suspend)(void);
93740 void (*resume)(void);
93741 void (*shutdown)(void);
93742-};
93743+} __do_const;
93744
93745 extern void register_syscore_ops(struct syscore_ops *ops);
93746 extern void unregister_syscore_ops(struct syscore_ops *ops);
93747diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
93748index b7361f8..341a15a 100644
93749--- a/include/linux/sysctl.h
93750+++ b/include/linux/sysctl.h
93751@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
93752
93753 extern int proc_dostring(struct ctl_table *, int,
93754 void __user *, size_t *, loff_t *);
93755+extern int proc_dostring_modpriv(struct ctl_table *, int,
93756+ void __user *, size_t *, loff_t *);
93757 extern int proc_dointvec(struct ctl_table *, int,
93758 void __user *, size_t *, loff_t *);
93759 extern int proc_dointvec_minmax(struct ctl_table *, int,
93760@@ -113,7 +115,8 @@ struct ctl_table
93761 struct ctl_table_poll *poll;
93762 void *extra1;
93763 void *extra2;
93764-};
93765+} __do_const __randomize_layout;
93766+typedef struct ctl_table __no_const ctl_table_no_const;
93767
93768 struct ctl_node {
93769 struct rb_node node;
93770diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
93771index ddad161..a3efd26 100644
93772--- a/include/linux/sysfs.h
93773+++ b/include/linux/sysfs.h
93774@@ -34,7 +34,8 @@ struct attribute {
93775 struct lock_class_key *key;
93776 struct lock_class_key skey;
93777 #endif
93778-};
93779+} __do_const;
93780+typedef struct attribute __no_const attribute_no_const;
93781
93782 /**
93783 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
93784@@ -63,7 +64,8 @@ struct attribute_group {
93785 struct attribute *, int);
93786 struct attribute **attrs;
93787 struct bin_attribute **bin_attrs;
93788-};
93789+} __do_const;
93790+typedef struct attribute_group __no_const attribute_group_no_const;
93791
93792 /**
93793 * Use these macros to make defining attributes easier. See include/linux/device.h
93794@@ -137,7 +139,8 @@ struct bin_attribute {
93795 char *, loff_t, size_t);
93796 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
93797 struct vm_area_struct *vma);
93798-};
93799+} __do_const;
93800+typedef struct bin_attribute __no_const bin_attribute_no_const;
93801
93802 /**
93803 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
93804diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
93805index 387fa7d..3fcde6b 100644
93806--- a/include/linux/sysrq.h
93807+++ b/include/linux/sysrq.h
93808@@ -16,6 +16,7 @@
93809
93810 #include <linux/errno.h>
93811 #include <linux/types.h>
93812+#include <linux/compiler.h>
93813
93814 /* Possible values of bitmask for enabling sysrq functions */
93815 /* 0x0001 is reserved for enable everything */
93816@@ -33,7 +34,7 @@ struct sysrq_key_op {
93817 char *help_msg;
93818 char *action_msg;
93819 int enable_mask;
93820-};
93821+} __do_const;
93822
93823 #ifdef CONFIG_MAGIC_SYSRQ
93824
93825diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
93826index ff307b5..f1a4468 100644
93827--- a/include/linux/thread_info.h
93828+++ b/include/linux/thread_info.h
93829@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
93830 #error "no set_restore_sigmask() provided and default one won't work"
93831 #endif
93832
93833+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
93834+
93835+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
93836+{
93837+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
93838+}
93839+
93840 #endif /* __KERNEL__ */
93841
93842 #endif /* _LINUX_THREAD_INFO_H */
93843diff --git a/include/linux/tty.h b/include/linux/tty.h
93844index 358a337..8829c1f 100644
93845--- a/include/linux/tty.h
93846+++ b/include/linux/tty.h
93847@@ -225,7 +225,7 @@ struct tty_port {
93848 const struct tty_port_operations *ops; /* Port operations */
93849 spinlock_t lock; /* Lock protecting tty field */
93850 int blocked_open; /* Waiting to open */
93851- int count; /* Usage count */
93852+ atomic_t count; /* Usage count */
93853 wait_queue_head_t open_wait; /* Open waiters */
93854 wait_queue_head_t close_wait; /* Close waiters */
93855 wait_queue_head_t delta_msr_wait; /* Modem status change */
93856@@ -313,7 +313,7 @@ struct tty_struct {
93857 /* If the tty has a pending do_SAK, queue it here - akpm */
93858 struct work_struct SAK_work;
93859 struct tty_port *port;
93860-};
93861+} __randomize_layout;
93862
93863 /* Each of a tty's open files has private_data pointing to tty_file_private */
93864 struct tty_file_private {
93865@@ -572,7 +572,7 @@ extern int tty_port_open(struct tty_port *port,
93866 struct tty_struct *tty, struct file *filp);
93867 static inline int tty_port_users(struct tty_port *port)
93868 {
93869- return port->count + port->blocked_open;
93870+ return atomic_read(&port->count) + port->blocked_open;
93871 }
93872
93873 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
93874diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
93875index 92e337c..f46757b 100644
93876--- a/include/linux/tty_driver.h
93877+++ b/include/linux/tty_driver.h
93878@@ -291,7 +291,7 @@ struct tty_operations {
93879 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
93880 #endif
93881 const struct file_operations *proc_fops;
93882-};
93883+} __do_const __randomize_layout;
93884
93885 struct tty_driver {
93886 int magic; /* magic number for this structure */
93887@@ -325,7 +325,7 @@ struct tty_driver {
93888
93889 const struct tty_operations *ops;
93890 struct list_head tty_drivers;
93891-};
93892+} __randomize_layout;
93893
93894 extern struct list_head tty_drivers;
93895
93896diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
93897index 00c9d68..bc0188b 100644
93898--- a/include/linux/tty_ldisc.h
93899+++ b/include/linux/tty_ldisc.h
93900@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
93901
93902 struct module *owner;
93903
93904- int refcount;
93905+ atomic_t refcount;
93906 };
93907
93908 struct tty_ldisc {
93909diff --git a/include/linux/types.h b/include/linux/types.h
93910index 6747247..fc7ec8b 100644
93911--- a/include/linux/types.h
93912+++ b/include/linux/types.h
93913@@ -174,10 +174,26 @@ typedef struct {
93914 int counter;
93915 } atomic_t;
93916
93917+#ifdef CONFIG_PAX_REFCOUNT
93918+typedef struct {
93919+ int counter;
93920+} atomic_unchecked_t;
93921+#else
93922+typedef atomic_t atomic_unchecked_t;
93923+#endif
93924+
93925 #ifdef CONFIG_64BIT
93926 typedef struct {
93927 long counter;
93928 } atomic64_t;
93929+
93930+#ifdef CONFIG_PAX_REFCOUNT
93931+typedef struct {
93932+ long counter;
93933+} atomic64_unchecked_t;
93934+#else
93935+typedef atomic64_t atomic64_unchecked_t;
93936+#endif
93937 #endif
93938
93939 struct list_head {
93940diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
93941index ecd3319..8a36ded 100644
93942--- a/include/linux/uaccess.h
93943+++ b/include/linux/uaccess.h
93944@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
93945 long ret; \
93946 mm_segment_t old_fs = get_fs(); \
93947 \
93948- set_fs(KERNEL_DS); \
93949 pagefault_disable(); \
93950- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
93951- pagefault_enable(); \
93952+ set_fs(KERNEL_DS); \
93953+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
93954 set_fs(old_fs); \
93955+ pagefault_enable(); \
93956 ret; \
93957 })
93958
93959diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
93960index 2d1f9b6..d7a9fce 100644
93961--- a/include/linux/uidgid.h
93962+++ b/include/linux/uidgid.h
93963@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
93964
93965 #endif /* CONFIG_USER_NS */
93966
93967+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
93968+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
93969+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
93970+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
93971+
93972 #endif /* _LINUX_UIDGID_H */
93973diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
93974index 32c0e83..671eb35 100644
93975--- a/include/linux/uio_driver.h
93976+++ b/include/linux/uio_driver.h
93977@@ -67,7 +67,7 @@ struct uio_device {
93978 struct module *owner;
93979 struct device *dev;
93980 int minor;
93981- atomic_t event;
93982+ atomic_unchecked_t event;
93983 struct fasync_struct *async_queue;
93984 wait_queue_head_t wait;
93985 struct uio_info *info;
93986diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
93987index 99c1b4d..562e6f3 100644
93988--- a/include/linux/unaligned/access_ok.h
93989+++ b/include/linux/unaligned/access_ok.h
93990@@ -4,34 +4,34 @@
93991 #include <linux/kernel.h>
93992 #include <asm/byteorder.h>
93993
93994-static inline u16 get_unaligned_le16(const void *p)
93995+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
93996 {
93997- return le16_to_cpup((__le16 *)p);
93998+ return le16_to_cpup((const __le16 *)p);
93999 }
94000
94001-static inline u32 get_unaligned_le32(const void *p)
94002+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
94003 {
94004- return le32_to_cpup((__le32 *)p);
94005+ return le32_to_cpup((const __le32 *)p);
94006 }
94007
94008-static inline u64 get_unaligned_le64(const void *p)
94009+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
94010 {
94011- return le64_to_cpup((__le64 *)p);
94012+ return le64_to_cpup((const __le64 *)p);
94013 }
94014
94015-static inline u16 get_unaligned_be16(const void *p)
94016+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
94017 {
94018- return be16_to_cpup((__be16 *)p);
94019+ return be16_to_cpup((const __be16 *)p);
94020 }
94021
94022-static inline u32 get_unaligned_be32(const void *p)
94023+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
94024 {
94025- return be32_to_cpup((__be32 *)p);
94026+ return be32_to_cpup((const __be32 *)p);
94027 }
94028
94029-static inline u64 get_unaligned_be64(const void *p)
94030+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
94031 {
94032- return be64_to_cpup((__be64 *)p);
94033+ return be64_to_cpup((const __be64 *)p);
94034 }
94035
94036 static inline void put_unaligned_le16(u16 val, void *p)
94037diff --git a/include/linux/usb.h b/include/linux/usb.h
94038index 447fe29..9fc875f 100644
94039--- a/include/linux/usb.h
94040+++ b/include/linux/usb.h
94041@@ -592,7 +592,7 @@ struct usb_device {
94042 int maxchild;
94043
94044 u32 quirks;
94045- atomic_t urbnum;
94046+ atomic_unchecked_t urbnum;
94047
94048 unsigned long active_duration;
94049
94050@@ -1676,7 +1676,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
94051
94052 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
94053 __u8 request, __u8 requesttype, __u16 value, __u16 index,
94054- void *data, __u16 size, int timeout);
94055+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
94056 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
94057 void *data, int len, int *actual_length, int timeout);
94058 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
94059diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
94060index 9fd9e48..e2c5f35 100644
94061--- a/include/linux/usb/renesas_usbhs.h
94062+++ b/include/linux/usb/renesas_usbhs.h
94063@@ -39,7 +39,7 @@ enum {
94064 */
94065 struct renesas_usbhs_driver_callback {
94066 int (*notify_hotplug)(struct platform_device *pdev);
94067-};
94068+} __no_const;
94069
94070 /*
94071 * callback functions for platform
94072diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
94073index 8297e5b..0dfae27 100644
94074--- a/include/linux/user_namespace.h
94075+++ b/include/linux/user_namespace.h
94076@@ -39,7 +39,7 @@ struct user_namespace {
94077 struct key *persistent_keyring_register;
94078 struct rw_semaphore persistent_keyring_register_sem;
94079 #endif
94080-};
94081+} __randomize_layout;
94082
94083 extern struct user_namespace init_user_ns;
94084
94085diff --git a/include/linux/utsname.h b/include/linux/utsname.h
94086index 5093f58..c103e58 100644
94087--- a/include/linux/utsname.h
94088+++ b/include/linux/utsname.h
94089@@ -25,7 +25,7 @@ struct uts_namespace {
94090 struct new_utsname name;
94091 struct user_namespace *user_ns;
94092 struct ns_common ns;
94093-};
94094+} __randomize_layout;
94095 extern struct uts_namespace init_uts_ns;
94096
94097 #ifdef CONFIG_UTS_NS
94098diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
94099index 6f8fbcf..4efc177 100644
94100--- a/include/linux/vermagic.h
94101+++ b/include/linux/vermagic.h
94102@@ -25,9 +25,42 @@
94103 #define MODULE_ARCH_VERMAGIC ""
94104 #endif
94105
94106+#ifdef CONFIG_PAX_REFCOUNT
94107+#define MODULE_PAX_REFCOUNT "REFCOUNT "
94108+#else
94109+#define MODULE_PAX_REFCOUNT ""
94110+#endif
94111+
94112+#ifdef CONSTIFY_PLUGIN
94113+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
94114+#else
94115+#define MODULE_CONSTIFY_PLUGIN ""
94116+#endif
94117+
94118+#ifdef STACKLEAK_PLUGIN
94119+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
94120+#else
94121+#define MODULE_STACKLEAK_PLUGIN ""
94122+#endif
94123+
94124+#ifdef RANDSTRUCT_PLUGIN
94125+#include <generated/randomize_layout_hash.h>
94126+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
94127+#else
94128+#define MODULE_RANDSTRUCT_PLUGIN
94129+#endif
94130+
94131+#ifdef CONFIG_GRKERNSEC
94132+#define MODULE_GRSEC "GRSEC "
94133+#else
94134+#define MODULE_GRSEC ""
94135+#endif
94136+
94137 #define VERMAGIC_STRING \
94138 UTS_RELEASE " " \
94139 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
94140 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
94141- MODULE_ARCH_VERMAGIC
94142+ MODULE_ARCH_VERMAGIC \
94143+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
94144+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
94145
94146diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
94147index b483abd..af305ad 100644
94148--- a/include/linux/vga_switcheroo.h
94149+++ b/include/linux/vga_switcheroo.h
94150@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
94151
94152 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
94153
94154-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
94155+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
94156 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
94157-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
94158+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
94159 #else
94160
94161 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
94162@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
94163
94164 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
94165
94166-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
94167+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
94168 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
94169-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
94170+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
94171
94172 #endif
94173 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
94174diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
94175index 0ec5983..cc61051 100644
94176--- a/include/linux/vmalloc.h
94177+++ b/include/linux/vmalloc.h
94178@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
94179 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
94180 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
94181 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
94182+
94183+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94184+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
94185+#endif
94186+
94187 /* bits [20..32] reserved for arch specific ioremap internals */
94188
94189 /*
94190@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
94191 unsigned long flags, pgprot_t prot);
94192 extern void vunmap(const void *addr);
94193
94194+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
94195+extern void unmap_process_stacks(struct task_struct *task);
94196+#endif
94197+
94198 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
94199 unsigned long uaddr, void *kaddr,
94200 unsigned long size);
94201@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
94202
94203 /* for /dev/kmem */
94204 extern long vread(char *buf, char *addr, unsigned long count);
94205-extern long vwrite(char *buf, char *addr, unsigned long count);
94206+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
94207
94208 /*
94209 * Internals. Dont't use..
94210diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
94211index 82e7db7..f8ce3d0 100644
94212--- a/include/linux/vmstat.h
94213+++ b/include/linux/vmstat.h
94214@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
94215 /*
94216 * Zone based page accounting with per cpu differentials.
94217 */
94218-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94219+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94220
94221 static inline void zone_page_state_add(long x, struct zone *zone,
94222 enum zone_stat_item item)
94223 {
94224- atomic_long_add(x, &zone->vm_stat[item]);
94225- atomic_long_add(x, &vm_stat[item]);
94226+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
94227+ atomic_long_add_unchecked(x, &vm_stat[item]);
94228 }
94229
94230-static inline unsigned long global_page_state(enum zone_stat_item item)
94231+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
94232 {
94233- long x = atomic_long_read(&vm_stat[item]);
94234+ long x = atomic_long_read_unchecked(&vm_stat[item]);
94235 #ifdef CONFIG_SMP
94236 if (x < 0)
94237 x = 0;
94238@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
94239 return x;
94240 }
94241
94242-static inline unsigned long zone_page_state(struct zone *zone,
94243+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
94244 enum zone_stat_item item)
94245 {
94246- long x = atomic_long_read(&zone->vm_stat[item]);
94247+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
94248 #ifdef CONFIG_SMP
94249 if (x < 0)
94250 x = 0;
94251@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
94252 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
94253 enum zone_stat_item item)
94254 {
94255- long x = atomic_long_read(&zone->vm_stat[item]);
94256+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
94257
94258 #ifdef CONFIG_SMP
94259 int cpu;
94260@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
94261
94262 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
94263 {
94264- atomic_long_inc(&zone->vm_stat[item]);
94265- atomic_long_inc(&vm_stat[item]);
94266+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
94267+ atomic_long_inc_unchecked(&vm_stat[item]);
94268 }
94269
94270 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
94271 {
94272- atomic_long_dec(&zone->vm_stat[item]);
94273- atomic_long_dec(&vm_stat[item]);
94274+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
94275+ atomic_long_dec_unchecked(&vm_stat[item]);
94276 }
94277
94278 static inline void __inc_zone_page_state(struct page *page,
94279diff --git a/include/linux/xattr.h b/include/linux/xattr.h
94280index 91b0a68..0e9adf6 100644
94281--- a/include/linux/xattr.h
94282+++ b/include/linux/xattr.h
94283@@ -28,7 +28,7 @@ struct xattr_handler {
94284 size_t size, int handler_flags);
94285 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
94286 size_t size, int flags, int handler_flags);
94287-};
94288+} __do_const;
94289
94290 struct xattr {
94291 const char *name;
94292@@ -37,6 +37,9 @@ struct xattr {
94293 };
94294
94295 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
94296+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
94297+ssize_t pax_getxattr(struct dentry *, void *, size_t);
94298+#endif
94299 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
94300 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
94301 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
94302diff --git a/include/linux/zlib.h b/include/linux/zlib.h
94303index 92dbbd3..13ab0b3 100644
94304--- a/include/linux/zlib.h
94305+++ b/include/linux/zlib.h
94306@@ -31,6 +31,7 @@
94307 #define _ZLIB_H
94308
94309 #include <linux/zconf.h>
94310+#include <linux/compiler.h>
94311
94312 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
94313 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
94314@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
94315
94316 /* basic functions */
94317
94318-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
94319+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
94320 /*
94321 Returns the number of bytes that needs to be allocated for a per-
94322 stream workspace with the specified parameters. A pointer to this
94323diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
94324index 3e4fddf..5ec9104 100644
94325--- a/include/media/v4l2-dev.h
94326+++ b/include/media/v4l2-dev.h
94327@@ -75,7 +75,7 @@ struct v4l2_file_operations {
94328 int (*mmap) (struct file *, struct vm_area_struct *);
94329 int (*open) (struct file *);
94330 int (*release) (struct file *);
94331-};
94332+} __do_const;
94333
94334 /*
94335 * Newer version of video_device, handled by videodev2.c
94336diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
94337index ffb69da..040393e 100644
94338--- a/include/media/v4l2-device.h
94339+++ b/include/media/v4l2-device.h
94340@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
94341 this function returns 0. If the name ends with a digit (e.g. cx18),
94342 then the name will be set to cx18-0 since cx180 looks really odd. */
94343 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
94344- atomic_t *instance);
94345+ atomic_unchecked_t *instance);
94346
94347 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
94348 Since the parent disappears this ensures that v4l2_dev doesn't have an
94349diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
94350index 2a25dec..bf6dd8a 100644
94351--- a/include/net/9p/transport.h
94352+++ b/include/net/9p/transport.h
94353@@ -62,7 +62,7 @@ struct p9_trans_module {
94354 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
94355 int (*zc_request)(struct p9_client *, struct p9_req_t *,
94356 char *, char *, int , int, int, int);
94357-};
94358+} __do_const;
94359
94360 void v9fs_register_trans(struct p9_trans_module *m);
94361 void v9fs_unregister_trans(struct p9_trans_module *m);
94362diff --git a/include/net/af_unix.h b/include/net/af_unix.h
94363index a175ba4..196eb8242 100644
94364--- a/include/net/af_unix.h
94365+++ b/include/net/af_unix.h
94366@@ -36,7 +36,7 @@ struct unix_skb_parms {
94367 u32 secid; /* Security ID */
94368 #endif
94369 u32 consumed;
94370-};
94371+} __randomize_layout;
94372
94373 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
94374 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
94375diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
94376index 2239a37..a83461f 100644
94377--- a/include/net/bluetooth/l2cap.h
94378+++ b/include/net/bluetooth/l2cap.h
94379@@ -609,7 +609,7 @@ struct l2cap_ops {
94380 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
94381 unsigned long hdr_len,
94382 unsigned long len, int nb);
94383-};
94384+} __do_const;
94385
94386 struct l2cap_conn {
94387 struct hci_conn *hcon;
94388diff --git a/include/net/bonding.h b/include/net/bonding.h
94389index fda6fee..dbdf83c 100644
94390--- a/include/net/bonding.h
94391+++ b/include/net/bonding.h
94392@@ -665,7 +665,7 @@ extern struct rtnl_link_ops bond_link_ops;
94393
94394 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
94395 {
94396- atomic_long_inc(&dev->tx_dropped);
94397+ atomic_long_inc_unchecked(&dev->tx_dropped);
94398 dev_kfree_skb_any(skb);
94399 }
94400
94401diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
94402index f2ae33d..c457cf0 100644
94403--- a/include/net/caif/cfctrl.h
94404+++ b/include/net/caif/cfctrl.h
94405@@ -52,7 +52,7 @@ struct cfctrl_rsp {
94406 void (*radioset_rsp)(void);
94407 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
94408 struct cflayer *client_layer);
94409-};
94410+} __no_const;
94411
94412 /* Link Setup Parameters for CAIF-Links. */
94413 struct cfctrl_link_param {
94414@@ -101,8 +101,8 @@ struct cfctrl_request_info {
94415 struct cfctrl {
94416 struct cfsrvl serv;
94417 struct cfctrl_rsp res;
94418- atomic_t req_seq_no;
94419- atomic_t rsp_seq_no;
94420+ atomic_unchecked_t req_seq_no;
94421+ atomic_unchecked_t rsp_seq_no;
94422 struct list_head list;
94423 /* Protects from simultaneous access to first_req list */
94424 spinlock_t info_list_lock;
94425diff --git a/include/net/flow.h b/include/net/flow.h
94426index 8109a15..504466d 100644
94427--- a/include/net/flow.h
94428+++ b/include/net/flow.h
94429@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
94430
94431 void flow_cache_flush(struct net *net);
94432 void flow_cache_flush_deferred(struct net *net);
94433-extern atomic_t flow_cache_genid;
94434+extern atomic_unchecked_t flow_cache_genid;
94435
94436 #endif
94437diff --git a/include/net/genetlink.h b/include/net/genetlink.h
94438index 0574abd..0f16881 100644
94439--- a/include/net/genetlink.h
94440+++ b/include/net/genetlink.h
94441@@ -130,7 +130,7 @@ struct genl_ops {
94442 u8 cmd;
94443 u8 internal_flags;
94444 u8 flags;
94445-};
94446+} __do_const;
94447
94448 int __genl_register_family(struct genl_family *family);
94449
94450diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
94451index 0f712c0..cd762c4 100644
94452--- a/include/net/gro_cells.h
94453+++ b/include/net/gro_cells.h
94454@@ -27,7 +27,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
94455 cell = this_cpu_ptr(gcells->cells);
94456
94457 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
94458- atomic_long_inc(&dev->rx_dropped);
94459+ atomic_long_inc_unchecked(&dev->rx_dropped);
94460 kfree_skb(skb);
94461 return;
94462 }
94463diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
94464index 5976bde..3a81660 100644
94465--- a/include/net/inet_connection_sock.h
94466+++ b/include/net/inet_connection_sock.h
94467@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
94468 int (*bind_conflict)(const struct sock *sk,
94469 const struct inet_bind_bucket *tb, bool relax);
94470 void (*mtu_reduced)(struct sock *sk);
94471-};
94472+} __do_const;
94473
94474 /** inet_connection_sock - INET connection oriented sock
94475 *
94476diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
94477index 80479ab..0c3f647 100644
94478--- a/include/net/inetpeer.h
94479+++ b/include/net/inetpeer.h
94480@@ -47,7 +47,7 @@ struct inet_peer {
94481 */
94482 union {
94483 struct {
94484- atomic_t rid; /* Frag reception counter */
94485+ atomic_unchecked_t rid; /* Frag reception counter */
94486 };
94487 struct rcu_head rcu;
94488 struct inet_peer *gc_next;
94489diff --git a/include/net/ip.h b/include/net/ip.h
94490index 6cc1eaf..14059b0 100644
94491--- a/include/net/ip.h
94492+++ b/include/net/ip.h
94493@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
94494 }
94495 }
94496
94497-u32 ip_idents_reserve(u32 hash, int segs);
94498+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
94499 void __ip_select_ident(struct iphdr *iph, int segs);
94500
94501 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
94502diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
94503index 5bd120e4..03fb812 100644
94504--- a/include/net/ip_fib.h
94505+++ b/include/net/ip_fib.h
94506@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
94507
94508 #define FIB_RES_SADDR(net, res) \
94509 ((FIB_RES_NH(res).nh_saddr_genid == \
94510- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
94511+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
94512 FIB_RES_NH(res).nh_saddr : \
94513 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
94514 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
94515diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
94516index 615b20b..fd4cbd8 100644
94517--- a/include/net/ip_vs.h
94518+++ b/include/net/ip_vs.h
94519@@ -534,7 +534,7 @@ struct ip_vs_conn {
94520 struct ip_vs_conn *control; /* Master control connection */
94521 atomic_t n_control; /* Number of controlled ones */
94522 struct ip_vs_dest *dest; /* real server */
94523- atomic_t in_pkts; /* incoming packet counter */
94524+ atomic_unchecked_t in_pkts; /* incoming packet counter */
94525
94526 /* Packet transmitter for different forwarding methods. If it
94527 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
94528@@ -682,7 +682,7 @@ struct ip_vs_dest {
94529 __be16 port; /* port number of the server */
94530 union nf_inet_addr addr; /* IP address of the server */
94531 volatile unsigned int flags; /* dest status flags */
94532- atomic_t conn_flags; /* flags to copy to conn */
94533+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
94534 atomic_t weight; /* server weight */
94535
94536 atomic_t refcnt; /* reference counter */
94537@@ -928,11 +928,11 @@ struct netns_ipvs {
94538 /* ip_vs_lblc */
94539 int sysctl_lblc_expiration;
94540 struct ctl_table_header *lblc_ctl_header;
94541- struct ctl_table *lblc_ctl_table;
94542+ ctl_table_no_const *lblc_ctl_table;
94543 /* ip_vs_lblcr */
94544 int sysctl_lblcr_expiration;
94545 struct ctl_table_header *lblcr_ctl_header;
94546- struct ctl_table *lblcr_ctl_table;
94547+ ctl_table_no_const *lblcr_ctl_table;
94548 /* ip_vs_est */
94549 struct list_head est_list; /* estimator list */
94550 spinlock_t est_lock;
94551diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
94552index 8d4f588..2e37ad2 100644
94553--- a/include/net/irda/ircomm_tty.h
94554+++ b/include/net/irda/ircomm_tty.h
94555@@ -33,6 +33,7 @@
94556 #include <linux/termios.h>
94557 #include <linux/timer.h>
94558 #include <linux/tty.h> /* struct tty_struct */
94559+#include <asm/local.h>
94560
94561 #include <net/irda/irias_object.h>
94562 #include <net/irda/ircomm_core.h>
94563diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
94564index 714cc9a..ea05f3e 100644
94565--- a/include/net/iucv/af_iucv.h
94566+++ b/include/net/iucv/af_iucv.h
94567@@ -149,7 +149,7 @@ struct iucv_skb_cb {
94568 struct iucv_sock_list {
94569 struct hlist_head head;
94570 rwlock_t lock;
94571- atomic_t autobind_name;
94572+ atomic_unchecked_t autobind_name;
94573 };
94574
94575 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
94576diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
94577index f3be818..bf46196 100644
94578--- a/include/net/llc_c_ac.h
94579+++ b/include/net/llc_c_ac.h
94580@@ -87,7 +87,7 @@
94581 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
94582 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
94583
94584-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
94585+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
94586
94587 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
94588 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
94589diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
94590index 3948cf1..83b28c4 100644
94591--- a/include/net/llc_c_ev.h
94592+++ b/include/net/llc_c_ev.h
94593@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
94594 return (struct llc_conn_state_ev *)skb->cb;
94595 }
94596
94597-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
94598-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
94599+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
94600+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
94601
94602 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
94603 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
94604diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
94605index 48f3f89..0e92c50 100644
94606--- a/include/net/llc_c_st.h
94607+++ b/include/net/llc_c_st.h
94608@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
94609 u8 next_state;
94610 const llc_conn_ev_qfyr_t *ev_qualifiers;
94611 const llc_conn_action_t *ev_actions;
94612-};
94613+} __do_const;
94614
94615 struct llc_conn_state {
94616 u8 current_state;
94617diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
94618index a61b98c..aade1eb 100644
94619--- a/include/net/llc_s_ac.h
94620+++ b/include/net/llc_s_ac.h
94621@@ -23,7 +23,7 @@
94622 #define SAP_ACT_TEST_IND 9
94623
94624 /* All action functions must look like this */
94625-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
94626+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
94627
94628 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
94629 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
94630diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
94631index c4359e2..76dbc4a 100644
94632--- a/include/net/llc_s_st.h
94633+++ b/include/net/llc_s_st.h
94634@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
94635 llc_sap_ev_t ev;
94636 u8 next_state;
94637 const llc_sap_action_t *ev_actions;
94638-};
94639+} __do_const;
94640
94641 struct llc_sap_state {
94642 u8 curr_state;
94643diff --git a/include/net/mac80211.h b/include/net/mac80211.h
94644index d52914b..2b13cec 100644
94645--- a/include/net/mac80211.h
94646+++ b/include/net/mac80211.h
94647@@ -4915,7 +4915,7 @@ struct rate_control_ops {
94648 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
94649
94650 u32 (*get_expected_throughput)(void *priv_sta);
94651-};
94652+} __do_const;
94653
94654 static inline int rate_supported(struct ieee80211_sta *sta,
94655 enum ieee80211_band band,
94656diff --git a/include/net/neighbour.h b/include/net/neighbour.h
94657index 76f7084..8f36e39 100644
94658--- a/include/net/neighbour.h
94659+++ b/include/net/neighbour.h
94660@@ -163,7 +163,7 @@ struct neigh_ops {
94661 void (*error_report)(struct neighbour *, struct sk_buff *);
94662 int (*output)(struct neighbour *, struct sk_buff *);
94663 int (*connected_output)(struct neighbour *, struct sk_buff *);
94664-};
94665+} __do_const;
94666
94667 struct pneigh_entry {
94668 struct pneigh_entry *next;
94669@@ -217,7 +217,7 @@ struct neigh_table {
94670 struct neigh_statistics __percpu *stats;
94671 struct neigh_hash_table __rcu *nht;
94672 struct pneigh_entry **phash_buckets;
94673-};
94674+} __randomize_layout;
94675
94676 enum {
94677 NEIGH_ARP_TABLE = 0,
94678diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
94679index 36faf49..6927638 100644
94680--- a/include/net/net_namespace.h
94681+++ b/include/net/net_namespace.h
94682@@ -131,8 +131,8 @@ struct net {
94683 struct netns_ipvs *ipvs;
94684 #endif
94685 struct sock *diag_nlsk;
94686- atomic_t fnhe_genid;
94687-};
94688+ atomic_unchecked_t fnhe_genid;
94689+} __randomize_layout;
94690
94691 #include <linux/seq_file_net.h>
94692
94693@@ -288,7 +288,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
94694 #define __net_init __init
94695 #define __net_exit __exit_refok
94696 #define __net_initdata __initdata
94697+#ifdef CONSTIFY_PLUGIN
94698 #define __net_initconst __initconst
94699+#else
94700+#define __net_initconst __initdata
94701+#endif
94702 #endif
94703
94704 int peernet2id(struct net *net, struct net *peer);
94705@@ -301,7 +305,7 @@ struct pernet_operations {
94706 void (*exit_batch)(struct list_head *net_exit_list);
94707 int *id;
94708 size_t size;
94709-};
94710+} __do_const;
94711
94712 /*
94713 * Use these carefully. If you implement a network device and it
94714@@ -349,12 +353,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
94715
94716 static inline int rt_genid_ipv4(struct net *net)
94717 {
94718- return atomic_read(&net->ipv4.rt_genid);
94719+ return atomic_read_unchecked(&net->ipv4.rt_genid);
94720 }
94721
94722 static inline void rt_genid_bump_ipv4(struct net *net)
94723 {
94724- atomic_inc(&net->ipv4.rt_genid);
94725+ atomic_inc_unchecked(&net->ipv4.rt_genid);
94726 }
94727
94728 extern void (*__fib6_flush_trees)(struct net *net);
94729@@ -381,12 +385,12 @@ static inline void rt_genid_bump_all(struct net *net)
94730
94731 static inline int fnhe_genid(struct net *net)
94732 {
94733- return atomic_read(&net->fnhe_genid);
94734+ return atomic_read_unchecked(&net->fnhe_genid);
94735 }
94736
94737 static inline void fnhe_genid_bump(struct net *net)
94738 {
94739- atomic_inc(&net->fnhe_genid);
94740+ atomic_inc_unchecked(&net->fnhe_genid);
94741 }
94742
94743 #endif /* __NET_NET_NAMESPACE_H */
94744diff --git a/include/net/netlink.h b/include/net/netlink.h
94745index e010ee8..405b9f4 100644
94746--- a/include/net/netlink.h
94747+++ b/include/net/netlink.h
94748@@ -518,7 +518,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
94749 {
94750 if (mark) {
94751 WARN_ON((unsigned char *) mark < skb->data);
94752- skb_trim(skb, (unsigned char *) mark - skb->data);
94753+ skb_trim(skb, (const unsigned char *) mark - skb->data);
94754 }
94755 }
94756
94757diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
94758index 29d6a94..235d3d84 100644
94759--- a/include/net/netns/conntrack.h
94760+++ b/include/net/netns/conntrack.h
94761@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
94762 struct nf_proto_net {
94763 #ifdef CONFIG_SYSCTL
94764 struct ctl_table_header *ctl_table_header;
94765- struct ctl_table *ctl_table;
94766+ ctl_table_no_const *ctl_table;
94767 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
94768 struct ctl_table_header *ctl_compat_header;
94769- struct ctl_table *ctl_compat_table;
94770+ ctl_table_no_const *ctl_compat_table;
94771 #endif
94772 #endif
94773 unsigned int users;
94774@@ -60,7 +60,7 @@ struct nf_ip_net {
94775 struct nf_icmp_net icmpv6;
94776 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
94777 struct ctl_table_header *ctl_table_header;
94778- struct ctl_table *ctl_table;
94779+ ctl_table_no_const *ctl_table;
94780 #endif
94781 };
94782
94783diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
94784index dbe2254..ed0c151 100644
94785--- a/include/net/netns/ipv4.h
94786+++ b/include/net/netns/ipv4.h
94787@@ -87,7 +87,7 @@ struct netns_ipv4 {
94788
94789 struct ping_group_range ping_group_range;
94790
94791- atomic_t dev_addr_genid;
94792+ atomic_unchecked_t dev_addr_genid;
94793
94794 #ifdef CONFIG_SYSCTL
94795 unsigned long *sysctl_local_reserved_ports;
94796@@ -101,6 +101,6 @@ struct netns_ipv4 {
94797 struct fib_rules_ops *mr_rules_ops;
94798 #endif
94799 #endif
94800- atomic_t rt_genid;
94801+ atomic_unchecked_t rt_genid;
94802 };
94803 #endif
94804diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
94805index 69ae41f..4f94868 100644
94806--- a/include/net/netns/ipv6.h
94807+++ b/include/net/netns/ipv6.h
94808@@ -75,8 +75,8 @@ struct netns_ipv6 {
94809 struct fib_rules_ops *mr6_rules_ops;
94810 #endif
94811 #endif
94812- atomic_t dev_addr_genid;
94813- atomic_t fib6_sernum;
94814+ atomic_unchecked_t dev_addr_genid;
94815+ atomic_unchecked_t fib6_sernum;
94816 };
94817
94818 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
94819diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
94820index 730d82a..045f2c4 100644
94821--- a/include/net/netns/xfrm.h
94822+++ b/include/net/netns/xfrm.h
94823@@ -78,7 +78,7 @@ struct netns_xfrm {
94824
94825 /* flow cache part */
94826 struct flow_cache flow_cache_global;
94827- atomic_t flow_cache_genid;
94828+ atomic_unchecked_t flow_cache_genid;
94829 struct list_head flow_cache_gc_list;
94830 spinlock_t flow_cache_gc_lock;
94831 struct work_struct flow_cache_gc_work;
94832diff --git a/include/net/ping.h b/include/net/ping.h
94833index cc16d41..664f40b 100644
94834--- a/include/net/ping.h
94835+++ b/include/net/ping.h
94836@@ -54,7 +54,7 @@ struct ping_iter_state {
94837
94838 extern struct proto ping_prot;
94839 #if IS_ENABLED(CONFIG_IPV6)
94840-extern struct pingv6_ops pingv6_ops;
94841+extern struct pingv6_ops *pingv6_ops;
94842 #endif
94843
94844 struct pingfakehdr {
94845diff --git a/include/net/protocol.h b/include/net/protocol.h
94846index d6fcc1f..ca277058 100644
94847--- a/include/net/protocol.h
94848+++ b/include/net/protocol.h
94849@@ -49,7 +49,7 @@ struct net_protocol {
94850 * socket lookup?
94851 */
94852 icmp_strict_tag_validation:1;
94853-};
94854+} __do_const;
94855
94856 #if IS_ENABLED(CONFIG_IPV6)
94857 struct inet6_protocol {
94858@@ -62,7 +62,7 @@ struct inet6_protocol {
94859 u8 type, u8 code, int offset,
94860 __be32 info);
94861 unsigned int flags; /* INET6_PROTO_xxx */
94862-};
94863+} __do_const;
94864
94865 #define INET6_PROTO_NOPOLICY 0x1
94866 #define INET6_PROTO_FINAL 0x2
94867diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
94868index 6c6d539..af70817 100644
94869--- a/include/net/rtnetlink.h
94870+++ b/include/net/rtnetlink.h
94871@@ -95,7 +95,7 @@ struct rtnl_link_ops {
94872 const struct net_device *dev,
94873 const struct net_device *slave_dev);
94874 struct net *(*get_link_net)(const struct net_device *dev);
94875-};
94876+} __do_const;
94877
94878 int __rtnl_link_register(struct rtnl_link_ops *ops);
94879 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
94880diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
94881index 4a5b9a3..ca27d73 100644
94882--- a/include/net/sctp/checksum.h
94883+++ b/include/net/sctp/checksum.h
94884@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
94885 unsigned int offset)
94886 {
94887 struct sctphdr *sh = sctp_hdr(skb);
94888- __le32 ret, old = sh->checksum;
94889- const struct skb_checksum_ops ops = {
94890+ __le32 ret, old = sh->checksum;
94891+ static const struct skb_checksum_ops ops = {
94892 .update = sctp_csum_update,
94893 .combine = sctp_csum_combine,
94894 };
94895diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
94896index 487ef34..d457f98 100644
94897--- a/include/net/sctp/sm.h
94898+++ b/include/net/sctp/sm.h
94899@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
94900 typedef struct {
94901 sctp_state_fn_t *fn;
94902 const char *name;
94903-} sctp_sm_table_entry_t;
94904+} __do_const sctp_sm_table_entry_t;
94905
94906 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
94907 * currently in use.
94908@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
94909 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
94910
94911 /* Extern declarations for major data structures. */
94912-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
94913+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
94914
94915
94916 /* Get the size of a DATA chunk payload. */
94917diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
94918index 2bb2fcf..d17c291 100644
94919--- a/include/net/sctp/structs.h
94920+++ b/include/net/sctp/structs.h
94921@@ -509,7 +509,7 @@ struct sctp_pf {
94922 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
94923 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
94924 struct sctp_af *af;
94925-};
94926+} __do_const;
94927
94928
94929 /* Structure to track chunk fragments that have been acked, but peer
94930diff --git a/include/net/sock.h b/include/net/sock.h
94931index e4079c2..79c5d3a 100644
94932--- a/include/net/sock.h
94933+++ b/include/net/sock.h
94934@@ -362,7 +362,7 @@ struct sock {
94935 unsigned int sk_napi_id;
94936 unsigned int sk_ll_usec;
94937 #endif
94938- atomic_t sk_drops;
94939+ atomic_unchecked_t sk_drops;
94940 int sk_rcvbuf;
94941
94942 struct sk_filter __rcu *sk_filter;
94943@@ -1039,7 +1039,7 @@ struct proto {
94944 void (*destroy_cgroup)(struct mem_cgroup *memcg);
94945 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
94946 #endif
94947-};
94948+} __randomize_layout;
94949
94950 /*
94951 * Bits in struct cg_proto.flags
94952@@ -1212,7 +1212,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
94953 page_counter_uncharge(&prot->memory_allocated, amt);
94954 }
94955
94956-static inline long
94957+static inline long __intentional_overflow(-1)
94958 sk_memory_allocated(const struct sock *sk)
94959 {
94960 struct proto *prot = sk->sk_prot;
94961@@ -1778,7 +1778,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
94962 }
94963
94964 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
94965- struct iov_iter *from, char *to,
94966+ struct iov_iter *from, unsigned char *to,
94967 int copy, int offset)
94968 {
94969 if (skb->ip_summed == CHECKSUM_NONE) {
94970@@ -2025,7 +2025,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
94971 }
94972 }
94973
94974-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
94975+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
94976
94977 /**
94978 * sk_page_frag - return an appropriate page_frag
94979diff --git a/include/net/tcp.h b/include/net/tcp.h
94980index 8d6b983..5813205 100644
94981--- a/include/net/tcp.h
94982+++ b/include/net/tcp.h
94983@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
94984 void tcp_xmit_retransmit_queue(struct sock *);
94985 void tcp_simple_retransmit(struct sock *);
94986 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
94987-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
94988+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
94989
94990 void tcp_send_probe0(struct sock *);
94991 void tcp_send_partial(struct sock *);
94992@@ -694,8 +694,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
94993 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
94994 */
94995 struct tcp_skb_cb {
94996- __u32 seq; /* Starting sequence number */
94997- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
94998+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
94999+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
95000 union {
95001 /* Note : tcp_tw_isn is used in input path only
95002 * (isn chosen by tcp_timewait_state_process())
95003@@ -720,7 +720,7 @@ struct tcp_skb_cb {
95004
95005 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
95006 /* 1 byte hole */
95007- __u32 ack_seq; /* Sequence number ACK'd */
95008+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
95009 union {
95010 struct inet_skb_parm h4;
95011 #if IS_ENABLED(CONFIG_IPV6)
95012diff --git a/include/net/xfrm.h b/include/net/xfrm.h
95013index dc4865e..152ee4c 100644
95014--- a/include/net/xfrm.h
95015+++ b/include/net/xfrm.h
95016@@ -285,7 +285,6 @@ struct xfrm_dst;
95017 struct xfrm_policy_afinfo {
95018 unsigned short family;
95019 struct dst_ops *dst_ops;
95020- void (*garbage_collect)(struct net *net);
95021 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
95022 const xfrm_address_t *saddr,
95023 const xfrm_address_t *daddr);
95024@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
95025 struct net_device *dev,
95026 const struct flowi *fl);
95027 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
95028-};
95029+} __do_const;
95030
95031 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
95032 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
95033@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
95034 int (*transport_finish)(struct sk_buff *skb,
95035 int async);
95036 void (*local_error)(struct sk_buff *skb, u32 mtu);
95037-};
95038+} __do_const;
95039
95040 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
95041 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
95042@@ -437,7 +436,7 @@ struct xfrm_mode {
95043 struct module *owner;
95044 unsigned int encap;
95045 int flags;
95046-};
95047+} __do_const;
95048
95049 /* Flags for xfrm_mode. */
95050 enum {
95051@@ -534,7 +533,7 @@ struct xfrm_policy {
95052 struct timer_list timer;
95053
95054 struct flow_cache_object flo;
95055- atomic_t genid;
95056+ atomic_unchecked_t genid;
95057 u32 priority;
95058 u32 index;
95059 struct xfrm_mark mark;
95060@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
95061 }
95062
95063 void xfrm_garbage_collect(struct net *net);
95064+void xfrm_garbage_collect_deferred(struct net *net);
95065
95066 #else
95067
95068@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
95069 static inline void xfrm_garbage_collect(struct net *net)
95070 {
95071 }
95072+static inline void xfrm_garbage_collect_deferred(struct net *net)
95073+{
95074+}
95075 #endif
95076
95077 static __inline__
95078diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
95079index 1017e0b..227aa4d 100644
95080--- a/include/rdma/iw_cm.h
95081+++ b/include/rdma/iw_cm.h
95082@@ -122,7 +122,7 @@ struct iw_cm_verbs {
95083 int backlog);
95084
95085 int (*destroy_listen)(struct iw_cm_id *cm_id);
95086-};
95087+} __no_const;
95088
95089 /**
95090 * iw_create_cm_id - Create an IW CM identifier.
95091diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
95092index 93d14da..734b3d8 100644
95093--- a/include/scsi/libfc.h
95094+++ b/include/scsi/libfc.h
95095@@ -771,6 +771,7 @@ struct libfc_function_template {
95096 */
95097 void (*disc_stop_final) (struct fc_lport *);
95098 };
95099+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
95100
95101 /**
95102 * struct fc_disc - Discovery context
95103@@ -875,7 +876,7 @@ struct fc_lport {
95104 struct fc_vport *vport;
95105
95106 /* Operational Information */
95107- struct libfc_function_template tt;
95108+ libfc_function_template_no_const tt;
95109 u8 link_up;
95110 u8 qfull;
95111 enum fc_lport_state state;
95112diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
95113index a4c9336..d6f8f34 100644
95114--- a/include/scsi/scsi_device.h
95115+++ b/include/scsi/scsi_device.h
95116@@ -185,9 +185,9 @@ struct scsi_device {
95117 unsigned int max_device_blocked; /* what device_blocked counts down from */
95118 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
95119
95120- atomic_t iorequest_cnt;
95121- atomic_t iodone_cnt;
95122- atomic_t ioerr_cnt;
95123+ atomic_unchecked_t iorequest_cnt;
95124+ atomic_unchecked_t iodone_cnt;
95125+ atomic_unchecked_t ioerr_cnt;
95126
95127 struct device sdev_gendev,
95128 sdev_dev;
95129diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
95130index 007a0bc..7188db8 100644
95131--- a/include/scsi/scsi_transport_fc.h
95132+++ b/include/scsi/scsi_transport_fc.h
95133@@ -756,7 +756,8 @@ struct fc_function_template {
95134 unsigned long show_host_system_hostname:1;
95135
95136 unsigned long disable_target_scan:1;
95137-};
95138+} __do_const;
95139+typedef struct fc_function_template __no_const fc_function_template_no_const;
95140
95141
95142 /**
95143diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
95144index f48089d..73abe48 100644
95145--- a/include/sound/compress_driver.h
95146+++ b/include/sound/compress_driver.h
95147@@ -130,7 +130,7 @@ struct snd_compr_ops {
95148 struct snd_compr_caps *caps);
95149 int (*get_codec_caps) (struct snd_compr_stream *stream,
95150 struct snd_compr_codec_caps *codec);
95151-};
95152+} __no_const;
95153
95154 /**
95155 * struct snd_compr: Compressed device
95156diff --git a/include/sound/soc.h b/include/sound/soc.h
95157index 0d1ade1..34e77d3 100644
95158--- a/include/sound/soc.h
95159+++ b/include/sound/soc.h
95160@@ -856,7 +856,7 @@ struct snd_soc_codec_driver {
95161 enum snd_soc_dapm_type, int);
95162
95163 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
95164-};
95165+} __do_const;
95166
95167 /* SoC platform interface */
95168 struct snd_soc_platform_driver {
95169@@ -883,7 +883,7 @@ struct snd_soc_platform_driver {
95170 const struct snd_compr_ops *compr_ops;
95171
95172 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
95173-};
95174+} __do_const;
95175
95176 struct snd_soc_dai_link_component {
95177 const char *name;
95178diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
95179index 985ca4c..b55b54a 100644
95180--- a/include/target/target_core_base.h
95181+++ b/include/target/target_core_base.h
95182@@ -767,7 +767,7 @@ struct se_device {
95183 atomic_long_t write_bytes;
95184 /* Active commands on this virtual SE device */
95185 atomic_t simple_cmds;
95186- atomic_t dev_ordered_id;
95187+ atomic_unchecked_t dev_ordered_id;
95188 atomic_t dev_ordered_sync;
95189 atomic_t dev_qf_count;
95190 int export_count;
95191diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
95192new file mode 100644
95193index 0000000..fb634b7
95194--- /dev/null
95195+++ b/include/trace/events/fs.h
95196@@ -0,0 +1,53 @@
95197+#undef TRACE_SYSTEM
95198+#define TRACE_SYSTEM fs
95199+
95200+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
95201+#define _TRACE_FS_H
95202+
95203+#include <linux/fs.h>
95204+#include <linux/tracepoint.h>
95205+
95206+TRACE_EVENT(do_sys_open,
95207+
95208+ TP_PROTO(const char *filename, int flags, int mode),
95209+
95210+ TP_ARGS(filename, flags, mode),
95211+
95212+ TP_STRUCT__entry(
95213+ __string( filename, filename )
95214+ __field( int, flags )
95215+ __field( int, mode )
95216+ ),
95217+
95218+ TP_fast_assign(
95219+ __assign_str(filename, filename);
95220+ __entry->flags = flags;
95221+ __entry->mode = mode;
95222+ ),
95223+
95224+ TP_printk("\"%s\" %x %o",
95225+ __get_str(filename), __entry->flags, __entry->mode)
95226+);
95227+
95228+TRACE_EVENT(open_exec,
95229+
95230+ TP_PROTO(const char *filename),
95231+
95232+ TP_ARGS(filename),
95233+
95234+ TP_STRUCT__entry(
95235+ __string( filename, filename )
95236+ ),
95237+
95238+ TP_fast_assign(
95239+ __assign_str(filename, filename);
95240+ ),
95241+
95242+ TP_printk("\"%s\"",
95243+ __get_str(filename))
95244+);
95245+
95246+#endif /* _TRACE_FS_H */
95247+
95248+/* This part must be outside protection */
95249+#include <trace/define_trace.h>
95250diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
95251index 3608beb..df39d8a 100644
95252--- a/include/trace/events/irq.h
95253+++ b/include/trace/events/irq.h
95254@@ -36,7 +36,7 @@ struct softirq_action;
95255 */
95256 TRACE_EVENT(irq_handler_entry,
95257
95258- TP_PROTO(int irq, struct irqaction *action),
95259+ TP_PROTO(int irq, const struct irqaction *action),
95260
95261 TP_ARGS(irq, action),
95262
95263@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
95264 */
95265 TRACE_EVENT(irq_handler_exit,
95266
95267- TP_PROTO(int irq, struct irqaction *action, int ret),
95268+ TP_PROTO(int irq, const struct irqaction *action, int ret),
95269
95270 TP_ARGS(irq, action, ret),
95271
95272diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
95273index 6eed16b..3e05750 100644
95274--- a/include/uapi/drm/i915_drm.h
95275+++ b/include/uapi/drm/i915_drm.h
95276@@ -347,6 +347,7 @@ typedef struct drm_i915_irq_wait {
95277 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
95278 #define I915_PARAM_MMAP_VERSION 30
95279 #define I915_PARAM_HAS_BSD2 31
95280+#define I915_PARAM_HAS_LEGACY_CONTEXT 35
95281
95282 typedef struct drm_i915_getparam {
95283 int param;
95284diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
95285index 7caf44c..23c6f27 100644
95286--- a/include/uapi/linux/a.out.h
95287+++ b/include/uapi/linux/a.out.h
95288@@ -39,6 +39,14 @@ enum machine_type {
95289 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
95290 };
95291
95292+/* Constants for the N_FLAGS field */
95293+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
95294+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
95295+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
95296+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
95297+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
95298+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
95299+
95300 #if !defined (N_MAGIC)
95301 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
95302 #endif
95303diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
95304index 22b6ad3..aeba37e 100644
95305--- a/include/uapi/linux/bcache.h
95306+++ b/include/uapi/linux/bcache.h
95307@@ -5,6 +5,7 @@
95308 * Bcache on disk data structures
95309 */
95310
95311+#include <linux/compiler.h>
95312 #include <asm/types.h>
95313
95314 #define BITMASK(name, type, field, offset, size) \
95315@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
95316 /* Btree keys - all units are in sectors */
95317
95318 struct bkey {
95319- __u64 high;
95320- __u64 low;
95321+ __u64 high __intentional_overflow(-1);
95322+ __u64 low __intentional_overflow(-1);
95323 __u64 ptr[];
95324 };
95325
95326diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
95327index d876736..ccce5c0 100644
95328--- a/include/uapi/linux/byteorder/little_endian.h
95329+++ b/include/uapi/linux/byteorder/little_endian.h
95330@@ -42,51 +42,51 @@
95331
95332 static inline __le64 __cpu_to_le64p(const __u64 *p)
95333 {
95334- return (__force __le64)*p;
95335+ return (__force const __le64)*p;
95336 }
95337-static inline __u64 __le64_to_cpup(const __le64 *p)
95338+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
95339 {
95340- return (__force __u64)*p;
95341+ return (__force const __u64)*p;
95342 }
95343 static inline __le32 __cpu_to_le32p(const __u32 *p)
95344 {
95345- return (__force __le32)*p;
95346+ return (__force const __le32)*p;
95347 }
95348 static inline __u32 __le32_to_cpup(const __le32 *p)
95349 {
95350- return (__force __u32)*p;
95351+ return (__force const __u32)*p;
95352 }
95353 static inline __le16 __cpu_to_le16p(const __u16 *p)
95354 {
95355- return (__force __le16)*p;
95356+ return (__force const __le16)*p;
95357 }
95358 static inline __u16 __le16_to_cpup(const __le16 *p)
95359 {
95360- return (__force __u16)*p;
95361+ return (__force const __u16)*p;
95362 }
95363 static inline __be64 __cpu_to_be64p(const __u64 *p)
95364 {
95365- return (__force __be64)__swab64p(p);
95366+ return (__force const __be64)__swab64p(p);
95367 }
95368 static inline __u64 __be64_to_cpup(const __be64 *p)
95369 {
95370- return __swab64p((__u64 *)p);
95371+ return __swab64p((const __u64 *)p);
95372 }
95373 static inline __be32 __cpu_to_be32p(const __u32 *p)
95374 {
95375- return (__force __be32)__swab32p(p);
95376+ return (__force const __be32)__swab32p(p);
95377 }
95378-static inline __u32 __be32_to_cpup(const __be32 *p)
95379+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
95380 {
95381- return __swab32p((__u32 *)p);
95382+ return __swab32p((const __u32 *)p);
95383 }
95384 static inline __be16 __cpu_to_be16p(const __u16 *p)
95385 {
95386- return (__force __be16)__swab16p(p);
95387+ return (__force const __be16)__swab16p(p);
95388 }
95389 static inline __u16 __be16_to_cpup(const __be16 *p)
95390 {
95391- return __swab16p((__u16 *)p);
95392+ return __swab16p((const __u16 *)p);
95393 }
95394 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
95395 #define __le64_to_cpus(x) do { (void)(x); } while (0)
95396diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
95397index 71e1d0e..6cc9caf 100644
95398--- a/include/uapi/linux/elf.h
95399+++ b/include/uapi/linux/elf.h
95400@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
95401 #define PT_GNU_EH_FRAME 0x6474e550
95402
95403 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
95404+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
95405+
95406+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
95407+
95408+/* Constants for the e_flags field */
95409+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
95410+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
95411+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
95412+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
95413+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
95414+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
95415
95416 /*
95417 * Extended Numbering
95418@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
95419 #define DT_DEBUG 21
95420 #define DT_TEXTREL 22
95421 #define DT_JMPREL 23
95422+#define DT_FLAGS 30
95423+ #define DF_TEXTREL 0x00000004
95424 #define DT_ENCODING 32
95425 #define OLD_DT_LOOS 0x60000000
95426 #define DT_LOOS 0x6000000d
95427@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
95428 #define PF_W 0x2
95429 #define PF_X 0x1
95430
95431+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
95432+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
95433+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
95434+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
95435+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
95436+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
95437+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
95438+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
95439+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
95440+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
95441+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
95442+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
95443+
95444 typedef struct elf32_phdr{
95445 Elf32_Word p_type;
95446 Elf32_Off p_offset;
95447@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
95448 #define EI_OSABI 7
95449 #define EI_PAD 8
95450
95451+#define EI_PAX 14
95452+
95453 #define ELFMAG0 0x7f /* EI_MAG */
95454 #define ELFMAG1 'E'
95455 #define ELFMAG2 'L'
95456diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
95457index aa169c4..6a2771d 100644
95458--- a/include/uapi/linux/personality.h
95459+++ b/include/uapi/linux/personality.h
95460@@ -30,6 +30,7 @@ enum {
95461 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
95462 ADDR_NO_RANDOMIZE | \
95463 ADDR_COMPAT_LAYOUT | \
95464+ ADDR_LIMIT_3GB | \
95465 MMAP_PAGE_ZERO)
95466
95467 /*
95468diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
95469index 7530e74..e714828 100644
95470--- a/include/uapi/linux/screen_info.h
95471+++ b/include/uapi/linux/screen_info.h
95472@@ -43,7 +43,8 @@ struct screen_info {
95473 __u16 pages; /* 0x32 */
95474 __u16 vesa_attributes; /* 0x34 */
95475 __u32 capabilities; /* 0x36 */
95476- __u8 _reserved[6]; /* 0x3a */
95477+ __u16 vesapm_size; /* 0x3a */
95478+ __u8 _reserved[4]; /* 0x3c */
95479 } __attribute__((packed));
95480
95481 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
95482diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
95483index 0e011eb..82681b1 100644
95484--- a/include/uapi/linux/swab.h
95485+++ b/include/uapi/linux/swab.h
95486@@ -43,7 +43,7 @@
95487 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
95488 */
95489
95490-static inline __attribute_const__ __u16 __fswab16(__u16 val)
95491+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
95492 {
95493 #ifdef __HAVE_BUILTIN_BSWAP16__
95494 return __builtin_bswap16(val);
95495@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
95496 #endif
95497 }
95498
95499-static inline __attribute_const__ __u32 __fswab32(__u32 val)
95500+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
95501 {
95502 #ifdef __HAVE_BUILTIN_BSWAP32__
95503 return __builtin_bswap32(val);
95504@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
95505 #endif
95506 }
95507
95508-static inline __attribute_const__ __u64 __fswab64(__u64 val)
95509+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
95510 {
95511 #ifdef __HAVE_BUILTIN_BSWAP64__
95512 return __builtin_bswap64(val);
95513diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
95514index 1590c49..5eab462 100644
95515--- a/include/uapi/linux/xattr.h
95516+++ b/include/uapi/linux/xattr.h
95517@@ -73,5 +73,9 @@
95518 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
95519 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
95520
95521+/* User namespace */
95522+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
95523+#define XATTR_PAX_FLAGS_SUFFIX "flags"
95524+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
95525
95526 #endif /* _UAPI_LINUX_XATTR_H */
95527diff --git a/include/video/udlfb.h b/include/video/udlfb.h
95528index f9466fa..f4e2b81 100644
95529--- a/include/video/udlfb.h
95530+++ b/include/video/udlfb.h
95531@@ -53,10 +53,10 @@ struct dlfb_data {
95532 u32 pseudo_palette[256];
95533 int blank_mode; /*one of FB_BLANK_ */
95534 /* blit-only rendering path metrics, exposed through sysfs */
95535- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
95536- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
95537- atomic_t bytes_sent; /* to usb, after compression including overhead */
95538- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
95539+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
95540+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
95541+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
95542+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
95543 };
95544
95545 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
95546diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
95547index 30f5362..8ed8ac9 100644
95548--- a/include/video/uvesafb.h
95549+++ b/include/video/uvesafb.h
95550@@ -122,6 +122,7 @@ struct uvesafb_par {
95551 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
95552 u8 pmi_setpal; /* PMI for palette changes */
95553 u16 *pmi_base; /* protected mode interface location */
95554+ u8 *pmi_code; /* protected mode code location */
95555 void *pmi_start;
95556 void *pmi_pal;
95557 u8 *vbe_state_orig; /*
95558diff --git a/init/Kconfig b/init/Kconfig
95559index f5dbc6d..8259396 100644
95560--- a/init/Kconfig
95561+++ b/init/Kconfig
95562@@ -1136,6 +1136,7 @@ endif # CGROUPS
95563
95564 config CHECKPOINT_RESTORE
95565 bool "Checkpoint/restore support" if EXPERT
95566+ depends on !GRKERNSEC
95567 default n
95568 help
95569 Enables additional kernel features in a sake of checkpoint/restore.
95570@@ -1646,7 +1647,7 @@ config SLUB_DEBUG
95571
95572 config COMPAT_BRK
95573 bool "Disable heap randomization"
95574- default y
95575+ default n
95576 help
95577 Randomizing heap placement makes heap exploits harder, but it
95578 also breaks ancient binaries (including anything libc5 based).
95579@@ -1977,7 +1978,7 @@ config INIT_ALL_POSSIBLE
95580 config STOP_MACHINE
95581 bool
95582 default y
95583- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
95584+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
95585 help
95586 Need stop_machine() primitive.
95587
95588diff --git a/init/Makefile b/init/Makefile
95589index 7bc47ee..6da2dc7 100644
95590--- a/init/Makefile
95591+++ b/init/Makefile
95592@@ -2,6 +2,9 @@
95593 # Makefile for the linux kernel.
95594 #
95595
95596+ccflags-y := $(GCC_PLUGINS_CFLAGS)
95597+asflags-y := $(GCC_PLUGINS_AFLAGS)
95598+
95599 obj-y := main.o version.o mounts.o
95600 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
95601 obj-y += noinitramfs.o
95602diff --git a/init/do_mounts.c b/init/do_mounts.c
95603index eb41008..f5dbbf9 100644
95604--- a/init/do_mounts.c
95605+++ b/init/do_mounts.c
95606@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
95607 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
95608 {
95609 struct super_block *s;
95610- int err = sys_mount(name, "/root", fs, flags, data);
95611+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
95612 if (err)
95613 return err;
95614
95615- sys_chdir("/root");
95616+ sys_chdir((const char __force_user *)"/root");
95617 s = current->fs->pwd.dentry->d_sb;
95618 ROOT_DEV = s->s_dev;
95619 printk(KERN_INFO
95620@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
95621 va_start(args, fmt);
95622 vsprintf(buf, fmt, args);
95623 va_end(args);
95624- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
95625+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
95626 if (fd >= 0) {
95627 sys_ioctl(fd, FDEJECT, 0);
95628 sys_close(fd);
95629 }
95630 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
95631- fd = sys_open("/dev/console", O_RDWR, 0);
95632+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
95633 if (fd >= 0) {
95634 sys_ioctl(fd, TCGETS, (long)&termios);
95635 termios.c_lflag &= ~ICANON;
95636 sys_ioctl(fd, TCSETSF, (long)&termios);
95637- sys_read(fd, &c, 1);
95638+ sys_read(fd, (char __user *)&c, 1);
95639 termios.c_lflag |= ICANON;
95640 sys_ioctl(fd, TCSETSF, (long)&termios);
95641 sys_close(fd);
95642@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
95643 mount_root();
95644 out:
95645 devtmpfs_mount("dev");
95646- sys_mount(".", "/", NULL, MS_MOVE, NULL);
95647- sys_chroot(".");
95648+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
95649+ sys_chroot((const char __force_user *)".");
95650 }
95651
95652 static bool is_tmpfs;
95653diff --git a/init/do_mounts.h b/init/do_mounts.h
95654index f5b978a..69dbfe8 100644
95655--- a/init/do_mounts.h
95656+++ b/init/do_mounts.h
95657@@ -15,15 +15,15 @@ extern int root_mountflags;
95658
95659 static inline int create_dev(char *name, dev_t dev)
95660 {
95661- sys_unlink(name);
95662- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
95663+ sys_unlink((char __force_user *)name);
95664+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
95665 }
95666
95667 #if BITS_PER_LONG == 32
95668 static inline u32 bstat(char *name)
95669 {
95670 struct stat64 stat;
95671- if (sys_stat64(name, &stat) != 0)
95672+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
95673 return 0;
95674 if (!S_ISBLK(stat.st_mode))
95675 return 0;
95676@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
95677 static inline u32 bstat(char *name)
95678 {
95679 struct stat stat;
95680- if (sys_newstat(name, &stat) != 0)
95681+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
95682 return 0;
95683 if (!S_ISBLK(stat.st_mode))
95684 return 0;
95685diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
95686index 3e0878e..8a9d7a0 100644
95687--- a/init/do_mounts_initrd.c
95688+++ b/init/do_mounts_initrd.c
95689@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
95690 {
95691 sys_unshare(CLONE_FS | CLONE_FILES);
95692 /* stdin/stdout/stderr for /linuxrc */
95693- sys_open("/dev/console", O_RDWR, 0);
95694+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
95695 sys_dup(0);
95696 sys_dup(0);
95697 /* move initrd over / and chdir/chroot in initrd root */
95698- sys_chdir("/root");
95699- sys_mount(".", "/", NULL, MS_MOVE, NULL);
95700- sys_chroot(".");
95701+ sys_chdir((const char __force_user *)"/root");
95702+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
95703+ sys_chroot((const char __force_user *)".");
95704 sys_setsid();
95705 return 0;
95706 }
95707@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
95708 create_dev("/dev/root.old", Root_RAM0);
95709 /* mount initrd on rootfs' /root */
95710 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
95711- sys_mkdir("/old", 0700);
95712- sys_chdir("/old");
95713+ sys_mkdir((const char __force_user *)"/old", 0700);
95714+ sys_chdir((const char __force_user *)"/old");
95715
95716 /* try loading default modules from initrd */
95717 load_default_modules();
95718@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
95719 current->flags &= ~PF_FREEZER_SKIP;
95720
95721 /* move initrd to rootfs' /old */
95722- sys_mount("..", ".", NULL, MS_MOVE, NULL);
95723+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
95724 /* switch root and cwd back to / of rootfs */
95725- sys_chroot("..");
95726+ sys_chroot((const char __force_user *)"..");
95727
95728 if (new_decode_dev(real_root_dev) == Root_RAM0) {
95729- sys_chdir("/old");
95730+ sys_chdir((const char __force_user *)"/old");
95731 return;
95732 }
95733
95734- sys_chdir("/");
95735+ sys_chdir((const char __force_user *)"/");
95736 ROOT_DEV = new_decode_dev(real_root_dev);
95737 mount_root();
95738
95739 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
95740- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
95741+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
95742 if (!error)
95743 printk("okay\n");
95744 else {
95745- int fd = sys_open("/dev/root.old", O_RDWR, 0);
95746+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
95747 if (error == -ENOENT)
95748 printk("/initrd does not exist. Ignored.\n");
95749 else
95750 printk("failed\n");
95751 printk(KERN_NOTICE "Unmounting old root\n");
95752- sys_umount("/old", MNT_DETACH);
95753+ sys_umount((char __force_user *)"/old", MNT_DETACH);
95754 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
95755 if (fd < 0) {
95756 error = fd;
95757@@ -127,11 +127,11 @@ int __init initrd_load(void)
95758 * mounted in the normal path.
95759 */
95760 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
95761- sys_unlink("/initrd.image");
95762+ sys_unlink((const char __force_user *)"/initrd.image");
95763 handle_initrd();
95764 return 1;
95765 }
95766 }
95767- sys_unlink("/initrd.image");
95768+ sys_unlink((const char __force_user *)"/initrd.image");
95769 return 0;
95770 }
95771diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
95772index 8cb6db5..d729f50 100644
95773--- a/init/do_mounts_md.c
95774+++ b/init/do_mounts_md.c
95775@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
95776 partitioned ? "_d" : "", minor,
95777 md_setup_args[ent].device_names);
95778
95779- fd = sys_open(name, 0, 0);
95780+ fd = sys_open((char __force_user *)name, 0, 0);
95781 if (fd < 0) {
95782 printk(KERN_ERR "md: open failed - cannot start "
95783 "array %s\n", name);
95784@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
95785 * array without it
95786 */
95787 sys_close(fd);
95788- fd = sys_open(name, 0, 0);
95789+ fd = sys_open((char __force_user *)name, 0, 0);
95790 sys_ioctl(fd, BLKRRPART, 0);
95791 }
95792 sys_close(fd);
95793@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
95794
95795 wait_for_device_probe();
95796
95797- fd = sys_open("/dev/md0", 0, 0);
95798+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
95799 if (fd >= 0) {
95800 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
95801 sys_close(fd);
95802diff --git a/init/init_task.c b/init/init_task.c
95803index ba0a7f36..2bcf1d5 100644
95804--- a/init/init_task.c
95805+++ b/init/init_task.c
95806@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
95807 * Initial thread structure. Alignment of this is handled by a special
95808 * linker map entry.
95809 */
95810+#ifdef CONFIG_X86
95811+union thread_union init_thread_union __init_task_data;
95812+#else
95813 union thread_union init_thread_union __init_task_data =
95814 { INIT_THREAD_INFO(init_task) };
95815+#endif
95816diff --git a/init/initramfs.c b/init/initramfs.c
95817index ad1bd77..dca2c1b 100644
95818--- a/init/initramfs.c
95819+++ b/init/initramfs.c
95820@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
95821
95822 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
95823 while (count) {
95824- ssize_t rv = sys_write(fd, p, count);
95825+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
95826
95827 if (rv < 0) {
95828 if (rv == -EINTR || rv == -EAGAIN)
95829@@ -107,7 +107,7 @@ static void __init free_hash(void)
95830 }
95831 }
95832
95833-static long __init do_utime(char *filename, time_t mtime)
95834+static long __init do_utime(char __force_user *filename, time_t mtime)
95835 {
95836 struct timespec t[2];
95837
95838@@ -142,7 +142,7 @@ static void __init dir_utime(void)
95839 struct dir_entry *de, *tmp;
95840 list_for_each_entry_safe(de, tmp, &dir_list, list) {
95841 list_del(&de->list);
95842- do_utime(de->name, de->mtime);
95843+ do_utime((char __force_user *)de->name, de->mtime);
95844 kfree(de->name);
95845 kfree(de);
95846 }
95847@@ -304,7 +304,7 @@ static int __init maybe_link(void)
95848 if (nlink >= 2) {
95849 char *old = find_link(major, minor, ino, mode, collected);
95850 if (old)
95851- return (sys_link(old, collected) < 0) ? -1 : 1;
95852+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
95853 }
95854 return 0;
95855 }
95856@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
95857 {
95858 struct stat st;
95859
95860- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
95861+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
95862 if (S_ISDIR(st.st_mode))
95863- sys_rmdir(path);
95864+ sys_rmdir((char __force_user *)path);
95865 else
95866- sys_unlink(path);
95867+ sys_unlink((char __force_user *)path);
95868 }
95869 }
95870
95871@@ -338,7 +338,7 @@ static int __init do_name(void)
95872 int openflags = O_WRONLY|O_CREAT;
95873 if (ml != 1)
95874 openflags |= O_TRUNC;
95875- wfd = sys_open(collected, openflags, mode);
95876+ wfd = sys_open((char __force_user *)collected, openflags, mode);
95877
95878 if (wfd >= 0) {
95879 sys_fchown(wfd, uid, gid);
95880@@ -350,17 +350,17 @@ static int __init do_name(void)
95881 }
95882 }
95883 } else if (S_ISDIR(mode)) {
95884- sys_mkdir(collected, mode);
95885- sys_chown(collected, uid, gid);
95886- sys_chmod(collected, mode);
95887+ sys_mkdir((char __force_user *)collected, mode);
95888+ sys_chown((char __force_user *)collected, uid, gid);
95889+ sys_chmod((char __force_user *)collected, mode);
95890 dir_add(collected, mtime);
95891 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
95892 S_ISFIFO(mode) || S_ISSOCK(mode)) {
95893 if (maybe_link() == 0) {
95894- sys_mknod(collected, mode, rdev);
95895- sys_chown(collected, uid, gid);
95896- sys_chmod(collected, mode);
95897- do_utime(collected, mtime);
95898+ sys_mknod((char __force_user *)collected, mode, rdev);
95899+ sys_chown((char __force_user *)collected, uid, gid);
95900+ sys_chmod((char __force_user *)collected, mode);
95901+ do_utime((char __force_user *)collected, mtime);
95902 }
95903 }
95904 return 0;
95905@@ -372,7 +372,7 @@ static int __init do_copy(void)
95906 if (xwrite(wfd, victim, body_len) != body_len)
95907 error("write error");
95908 sys_close(wfd);
95909- do_utime(vcollected, mtime);
95910+ do_utime((char __force_user *)vcollected, mtime);
95911 kfree(vcollected);
95912 eat(body_len);
95913 state = SkipIt;
95914@@ -390,9 +390,9 @@ static int __init do_symlink(void)
95915 {
95916 collected[N_ALIGN(name_len) + body_len] = '\0';
95917 clean_path(collected, 0);
95918- sys_symlink(collected + N_ALIGN(name_len), collected);
95919- sys_lchown(collected, uid, gid);
95920- do_utime(collected, mtime);
95921+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
95922+ sys_lchown((char __force_user *)collected, uid, gid);
95923+ do_utime((char __force_user *)collected, mtime);
95924 state = SkipIt;
95925 next_state = Reset;
95926 return 0;
95927diff --git a/init/main.c b/init/main.c
95928index 6f0f1c5f..a542824 100644
95929--- a/init/main.c
95930+++ b/init/main.c
95931@@ -96,6 +96,8 @@ extern void radix_tree_init(void);
95932 static inline void mark_rodata_ro(void) { }
95933 #endif
95934
95935+extern void grsecurity_init(void);
95936+
95937 /*
95938 * Debug helper: via this flag we know that we are in 'early bootup code'
95939 * where only the boot processor is running with IRQ disabled. This means
95940@@ -157,6 +159,85 @@ static int __init set_reset_devices(char *str)
95941
95942 __setup("reset_devices", set_reset_devices);
95943
95944+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
95945+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
95946+static int __init setup_grsec_proc_gid(char *str)
95947+{
95948+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
95949+ return 1;
95950+}
95951+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
95952+#endif
95953+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
95954+int grsec_enable_sysfs_restrict = 1;
95955+static int __init setup_grsec_sysfs_restrict(char *str)
95956+{
95957+ if (!simple_strtol(str, NULL, 0))
95958+ grsec_enable_sysfs_restrict = 0;
95959+ return 1;
95960+}
95961+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
95962+#endif
95963+
95964+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
95965+unsigned long pax_user_shadow_base __read_only;
95966+EXPORT_SYMBOL(pax_user_shadow_base);
95967+extern char pax_enter_kernel_user[];
95968+extern char pax_exit_kernel_user[];
95969+#endif
95970+
95971+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
95972+static int __init setup_pax_nouderef(char *str)
95973+{
95974+#ifdef CONFIG_X86_32
95975+ unsigned int cpu;
95976+ struct desc_struct *gdt;
95977+
95978+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
95979+ gdt = get_cpu_gdt_table(cpu);
95980+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
95981+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
95982+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
95983+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
95984+ }
95985+ loadsegment(ds, __KERNEL_DS);
95986+ loadsegment(es, __KERNEL_DS);
95987+ loadsegment(ss, __KERNEL_DS);
95988+#else
95989+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
95990+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
95991+ clone_pgd_mask = ~(pgdval_t)0UL;
95992+ pax_user_shadow_base = 0UL;
95993+ setup_clear_cpu_cap(X86_FEATURE_PCID);
95994+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
95995+#endif
95996+
95997+ return 0;
95998+}
95999+early_param("pax_nouderef", setup_pax_nouderef);
96000+
96001+#ifdef CONFIG_X86_64
96002+static int __init setup_pax_weakuderef(char *str)
96003+{
96004+ if (clone_pgd_mask != ~(pgdval_t)0UL)
96005+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
96006+ return 1;
96007+}
96008+__setup("pax_weakuderef", setup_pax_weakuderef);
96009+#endif
96010+#endif
96011+
96012+#ifdef CONFIG_PAX_SOFTMODE
96013+int pax_softmode;
96014+
96015+static int __init setup_pax_softmode(char *str)
96016+{
96017+ get_option(&str, &pax_softmode);
96018+ return 1;
96019+}
96020+__setup("pax_softmode=", setup_pax_softmode);
96021+#endif
96022+
96023 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
96024 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
96025 static const char *panic_later, *panic_param;
96026@@ -722,7 +803,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
96027 struct blacklist_entry *entry;
96028 char *fn_name;
96029
96030- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
96031+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
96032 if (!fn_name)
96033 return false;
96034
96035@@ -774,7 +855,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
96036 {
96037 int count = preempt_count();
96038 int ret;
96039- char msgbuf[64];
96040+ const char *msg1 = "", *msg2 = "";
96041
96042 if (initcall_blacklisted(fn))
96043 return -EPERM;
96044@@ -784,18 +865,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
96045 else
96046 ret = fn();
96047
96048- msgbuf[0] = 0;
96049-
96050 if (preempt_count() != count) {
96051- sprintf(msgbuf, "preemption imbalance ");
96052+ msg1 = " preemption imbalance";
96053 preempt_count_set(count);
96054 }
96055 if (irqs_disabled()) {
96056- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
96057+ msg2 = " disabled interrupts";
96058 local_irq_enable();
96059 }
96060- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
96061+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
96062
96063+ add_latent_entropy();
96064 return ret;
96065 }
96066
96067@@ -901,8 +981,8 @@ static int run_init_process(const char *init_filename)
96068 {
96069 argv_init[0] = init_filename;
96070 return do_execve(getname_kernel(init_filename),
96071- (const char __user *const __user *)argv_init,
96072- (const char __user *const __user *)envp_init);
96073+ (const char __user *const __force_user *)argv_init,
96074+ (const char __user *const __force_user *)envp_init);
96075 }
96076
96077 static int try_to_run_init_process(const char *init_filename)
96078@@ -919,6 +999,10 @@ static int try_to_run_init_process(const char *init_filename)
96079 return ret;
96080 }
96081
96082+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
96083+extern int gr_init_ran;
96084+#endif
96085+
96086 static noinline void __init kernel_init_freeable(void);
96087
96088 static int __ref kernel_init(void *unused)
96089@@ -943,6 +1027,11 @@ static int __ref kernel_init(void *unused)
96090 ramdisk_execute_command, ret);
96091 }
96092
96093+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
96094+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
96095+ gr_init_ran = 1;
96096+#endif
96097+
96098 /*
96099 * We try each of these until one succeeds.
96100 *
96101@@ -998,7 +1087,7 @@ static noinline void __init kernel_init_freeable(void)
96102 do_basic_setup();
96103
96104 /* Open the /dev/console on the rootfs, this should never fail */
96105- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
96106+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
96107 pr_err("Warning: unable to open an initial console.\n");
96108
96109 (void) sys_dup(0);
96110@@ -1011,11 +1100,13 @@ static noinline void __init kernel_init_freeable(void)
96111 if (!ramdisk_execute_command)
96112 ramdisk_execute_command = "/init";
96113
96114- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
96115+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
96116 ramdisk_execute_command = NULL;
96117 prepare_namespace();
96118 }
96119
96120+ grsecurity_init();
96121+
96122 /*
96123 * Ok, we have completed the initial bootup, and
96124 * we're essentially up and running. Get rid of the
96125diff --git a/ipc/compat.c b/ipc/compat.c
96126index 9b3c85f..5266b0f 100644
96127--- a/ipc/compat.c
96128+++ b/ipc/compat.c
96129@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
96130 COMPAT_SHMLBA);
96131 if (err < 0)
96132 return err;
96133- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
96134+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
96135 }
96136 case SHMDT:
96137 return sys_shmdt(compat_ptr(ptr));
96138@@ -747,7 +747,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
96139 }
96140
96141 COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
96142- unsigned, nsops,
96143+ compat_long_t, nsops,
96144 const struct compat_timespec __user *, timeout)
96145 {
96146 struct timespec __user *ts64;
96147diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
96148index 8ad93c2..efd80f8 100644
96149--- a/ipc/ipc_sysctl.c
96150+++ b/ipc/ipc_sysctl.c
96151@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
96152 static int proc_ipc_dointvec(struct ctl_table *table, int write,
96153 void __user *buffer, size_t *lenp, loff_t *ppos)
96154 {
96155- struct ctl_table ipc_table;
96156+ ctl_table_no_const ipc_table;
96157
96158 memcpy(&ipc_table, table, sizeof(ipc_table));
96159 ipc_table.data = get_ipc(table);
96160@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
96161 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
96162 void __user *buffer, size_t *lenp, loff_t *ppos)
96163 {
96164- struct ctl_table ipc_table;
96165+ ctl_table_no_const ipc_table;
96166
96167 memcpy(&ipc_table, table, sizeof(ipc_table));
96168 ipc_table.data = get_ipc(table);
96169@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
96170 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
96171 void __user *buffer, size_t *lenp, loff_t *ppos)
96172 {
96173- struct ctl_table ipc_table;
96174+ ctl_table_no_const ipc_table;
96175 memcpy(&ipc_table, table, sizeof(ipc_table));
96176 ipc_table.data = get_ipc(table);
96177
96178@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
96179 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
96180 void __user *buffer, size_t *lenp, loff_t *ppos)
96181 {
96182- struct ctl_table ipc_table;
96183+ ctl_table_no_const ipc_table;
96184 int dummy = 0;
96185
96186 memcpy(&ipc_table, table, sizeof(ipc_table));
96187diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
96188index 68d4e95..1477ded 100644
96189--- a/ipc/mq_sysctl.c
96190+++ b/ipc/mq_sysctl.c
96191@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
96192 static int proc_mq_dointvec(struct ctl_table *table, int write,
96193 void __user *buffer, size_t *lenp, loff_t *ppos)
96194 {
96195- struct ctl_table mq_table;
96196+ ctl_table_no_const mq_table;
96197 memcpy(&mq_table, table, sizeof(mq_table));
96198 mq_table.data = get_mq(table);
96199
96200@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
96201 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
96202 void __user *buffer, size_t *lenp, loff_t *ppos)
96203 {
96204- struct ctl_table mq_table;
96205+ ctl_table_no_const mq_table;
96206 memcpy(&mq_table, table, sizeof(mq_table));
96207 mq_table.data = get_mq(table);
96208
96209diff --git a/ipc/mqueue.c b/ipc/mqueue.c
96210index 7635a1c..7432cb6 100644
96211--- a/ipc/mqueue.c
96212+++ b/ipc/mqueue.c
96213@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
96214 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
96215 info->attr.mq_msgsize);
96216
96217+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
96218 spin_lock(&mq_lock);
96219 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
96220 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
96221diff --git a/ipc/sem.c b/ipc/sem.c
96222index 9284211..bca5b1b 100644
96223--- a/ipc/sem.c
96224+++ b/ipc/sem.c
96225@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
96226 }
96227
96228 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
96229- unsigned, nsops, const struct timespec __user *, timeout)
96230+ long, nsops, const struct timespec __user *, timeout)
96231 {
96232 int error = -EINVAL;
96233 struct sem_array *sma;
96234@@ -2015,7 +2015,7 @@ out_free:
96235 }
96236
96237 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
96238- unsigned, nsops)
96239+ long, nsops)
96240 {
96241 return sys_semtimedop(semid, tsops, nsops, NULL);
96242 }
96243diff --git a/ipc/shm.c b/ipc/shm.c
96244index 19633b4..d454904 100644
96245--- a/ipc/shm.c
96246+++ b/ipc/shm.c
96247@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
96248 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
96249 #endif
96250
96251+#ifdef CONFIG_GRKERNSEC
96252+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
96253+ const u64 shm_createtime, const kuid_t cuid,
96254+ const int shmid);
96255+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
96256+ const u64 shm_createtime);
96257+#endif
96258+
96259 void shm_init_ns(struct ipc_namespace *ns)
96260 {
96261 ns->shm_ctlmax = SHMMAX;
96262@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
96263 shp->shm_lprid = 0;
96264 shp->shm_atim = shp->shm_dtim = 0;
96265 shp->shm_ctim = get_seconds();
96266+#ifdef CONFIG_GRKERNSEC
96267+ shp->shm_createtime = ktime_get_ns();
96268+#endif
96269 shp->shm_segsz = size;
96270 shp->shm_nattch = 0;
96271 shp->shm_file = file;
96272@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96273 f_mode = FMODE_READ | FMODE_WRITE;
96274 }
96275 if (shmflg & SHM_EXEC) {
96276+
96277+#ifdef CONFIG_PAX_MPROTECT
96278+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
96279+ goto out;
96280+#endif
96281+
96282 prot |= PROT_EXEC;
96283 acc_mode |= S_IXUGO;
96284 }
96285@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96286 if (err)
96287 goto out_unlock;
96288
96289+#ifdef CONFIG_GRKERNSEC
96290+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
96291+ shp->shm_perm.cuid, shmid) ||
96292+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
96293+ err = -EACCES;
96294+ goto out_unlock;
96295+ }
96296+#endif
96297+
96298 ipc_lock_object(&shp->shm_perm);
96299
96300 /* check if shm_destroy() is tearing down shp */
96301@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96302 path = shp->shm_file->f_path;
96303 path_get(&path);
96304 shp->shm_nattch++;
96305+#ifdef CONFIG_GRKERNSEC
96306+ shp->shm_lapid = current->pid;
96307+#endif
96308 size = i_size_read(path.dentry->d_inode);
96309 ipc_unlock_object(&shp->shm_perm);
96310 rcu_read_unlock();
96311diff --git a/ipc/util.c b/ipc/util.c
96312index 106bed0..f851429 100644
96313--- a/ipc/util.c
96314+++ b/ipc/util.c
96315@@ -71,6 +71,8 @@ struct ipc_proc_iface {
96316 int (*show)(struct seq_file *, void *);
96317 };
96318
96319+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
96320+
96321 /**
96322 * ipc_init - initialise ipc subsystem
96323 *
96324@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
96325 granted_mode >>= 6;
96326 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
96327 granted_mode >>= 3;
96328+
96329+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
96330+ return -1;
96331+
96332 /* is there some bit set in requested_mode but not in granted_mode? */
96333 if ((requested_mode & ~granted_mode & 0007) &&
96334 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
96335diff --git a/kernel/audit.c b/kernel/audit.c
96336index 72ab759..757deba 100644
96337--- a/kernel/audit.c
96338+++ b/kernel/audit.c
96339@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
96340 3) suppressed due to audit_rate_limit
96341 4) suppressed due to audit_backlog_limit
96342 */
96343-static atomic_t audit_lost = ATOMIC_INIT(0);
96344+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
96345
96346 /* The netlink socket. */
96347 static struct sock *audit_sock;
96348@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
96349 unsigned long now;
96350 int print;
96351
96352- atomic_inc(&audit_lost);
96353+ atomic_inc_unchecked(&audit_lost);
96354
96355 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
96356
96357@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
96358 if (print) {
96359 if (printk_ratelimit())
96360 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
96361- atomic_read(&audit_lost),
96362+ atomic_read_unchecked(&audit_lost),
96363 audit_rate_limit,
96364 audit_backlog_limit);
96365 audit_panic(message);
96366@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
96367 s.pid = audit_pid;
96368 s.rate_limit = audit_rate_limit;
96369 s.backlog_limit = audit_backlog_limit;
96370- s.lost = atomic_read(&audit_lost);
96371+ s.lost = atomic_read_unchecked(&audit_lost);
96372 s.backlog = skb_queue_len(&audit_skb_queue);
96373 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
96374 s.backlog_wait_time = audit_backlog_wait_time;
96375diff --git a/kernel/auditsc.c b/kernel/auditsc.c
96376index dc4ae70..2a2bddc 100644
96377--- a/kernel/auditsc.c
96378+++ b/kernel/auditsc.c
96379@@ -1955,7 +1955,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
96380 }
96381
96382 /* global counter which is incremented every time something logs in */
96383-static atomic_t session_id = ATOMIC_INIT(0);
96384+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
96385
96386 static int audit_set_loginuid_perm(kuid_t loginuid)
96387 {
96388@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
96389
96390 /* are we setting or clearing? */
96391 if (uid_valid(loginuid))
96392- sessionid = (unsigned int)atomic_inc_return(&session_id);
96393+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
96394
96395 task->sessionid = sessionid;
96396 task->loginuid = loginuid;
96397diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
96398index 0c5796e..a9414e2 100644
96399--- a/kernel/bpf/core.c
96400+++ b/kernel/bpf/core.c
96401@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
96402 * random section of illegal instructions.
96403 */
96404 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
96405- hdr = module_alloc(size);
96406+ hdr = module_alloc_exec(size);
96407 if (hdr == NULL)
96408 return NULL;
96409
96410 /* Fill space with illegal/arch-dep instructions. */
96411 bpf_fill_ill_insns(hdr, size);
96412
96413+ pax_open_kernel();
96414 hdr->pages = size / PAGE_SIZE;
96415+ pax_close_kernel();
96416+
96417 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
96418 PAGE_SIZE - sizeof(*hdr));
96419 start = (prandom_u32() % hole) & ~(alignment - 1);
96420@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
96421
96422 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
96423 {
96424- module_memfree(hdr);
96425+ module_memfree_exec(hdr);
96426 }
96427 #endif /* CONFIG_BPF_JIT */
96428
96429diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
96430index 536edc2..d28c85d 100644
96431--- a/kernel/bpf/syscall.c
96432+++ b/kernel/bpf/syscall.c
96433@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
96434 int err;
96435
96436 /* the syscall is limited to root temporarily. This restriction will be
96437- * lifted when security audit is clean. Note that eBPF+tracing must have
96438- * this restriction, since it may pass kernel data to user space
96439+ * lifted by upstream when a half-assed security audit is clean. Note
96440+ * that eBPF+tracing must have this restriction, since it may pass
96441+ * kernel data to user space
96442 */
96443 if (!capable(CAP_SYS_ADMIN))
96444 return -EPERM;
96445+#ifdef CONFIG_GRKERNSEC
96446+ return -EPERM;
96447+#endif
96448
96449 if (!access_ok(VERIFY_READ, uattr, 1))
96450 return -EFAULT;
96451diff --git a/kernel/capability.c b/kernel/capability.c
96452index 989f5bf..d317ca0 100644
96453--- a/kernel/capability.c
96454+++ b/kernel/capability.c
96455@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
96456 * before modification is attempted and the application
96457 * fails.
96458 */
96459+ if (tocopy > ARRAY_SIZE(kdata))
96460+ return -EFAULT;
96461+
96462 if (copy_to_user(dataptr, kdata, tocopy
96463 * sizeof(struct __user_cap_data_struct))) {
96464 return -EFAULT;
96465@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
96466 int ret;
96467
96468 rcu_read_lock();
96469- ret = security_capable(__task_cred(t), ns, cap);
96470+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
96471+ gr_task_is_capable(t, __task_cred(t), cap);
96472 rcu_read_unlock();
96473
96474- return (ret == 0);
96475+ return ret;
96476 }
96477
96478 /**
96479@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
96480 int ret;
96481
96482 rcu_read_lock();
96483- ret = security_capable_noaudit(__task_cred(t), ns, cap);
96484+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
96485 rcu_read_unlock();
96486
96487- return (ret == 0);
96488+ return ret;
96489 }
96490
96491 /**
96492@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
96493 BUG();
96494 }
96495
96496- if (security_capable(current_cred(), ns, cap) == 0) {
96497+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
96498 current->flags |= PF_SUPERPRIV;
96499 return true;
96500 }
96501@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
96502 }
96503 EXPORT_SYMBOL(ns_capable);
96504
96505+bool ns_capable_nolog(struct user_namespace *ns, int cap)
96506+{
96507+ if (unlikely(!cap_valid(cap))) {
96508+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
96509+ BUG();
96510+ }
96511+
96512+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
96513+ current->flags |= PF_SUPERPRIV;
96514+ return true;
96515+ }
96516+ return false;
96517+}
96518+EXPORT_SYMBOL(ns_capable_nolog);
96519+
96520 /**
96521 * file_ns_capable - Determine if the file's opener had a capability in effect
96522 * @file: The file we want to check
96523@@ -427,6 +446,12 @@ bool capable(int cap)
96524 }
96525 EXPORT_SYMBOL(capable);
96526
96527+bool capable_nolog(int cap)
96528+{
96529+ return ns_capable_nolog(&init_user_ns, cap);
96530+}
96531+EXPORT_SYMBOL(capable_nolog);
96532+
96533 /**
96534 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
96535 * @inode: The inode in question
96536@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
96537 kgid_has_mapping(ns, inode->i_gid);
96538 }
96539 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
96540+
96541+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
96542+{
96543+ struct user_namespace *ns = current_user_ns();
96544+
96545+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
96546+ kgid_has_mapping(ns, inode->i_gid);
96547+}
96548+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
96549diff --git a/kernel/cgroup.c b/kernel/cgroup.c
96550index 29a7b2c..a64e30a 100644
96551--- a/kernel/cgroup.c
96552+++ b/kernel/cgroup.c
96553@@ -5347,6 +5347,9 @@ static void cgroup_release_agent(struct work_struct *work)
96554 if (!pathbuf || !agentbuf)
96555 goto out;
96556
96557+ if (agentbuf[0] == '\0')
96558+ goto out;
96559+
96560 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
96561 if (!path)
96562 goto out;
96563@@ -5532,7 +5535,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
96564 struct task_struct *task;
96565 int count = 0;
96566
96567- seq_printf(seq, "css_set %p\n", cset);
96568+ seq_printf(seq, "css_set %pK\n", cset);
96569
96570 list_for_each_entry(task, &cset->tasks, cg_list) {
96571 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
96572diff --git a/kernel/compat.c b/kernel/compat.c
96573index 24f0061..ea80802 100644
96574--- a/kernel/compat.c
96575+++ b/kernel/compat.c
96576@@ -13,6 +13,7 @@
96577
96578 #include <linux/linkage.h>
96579 #include <linux/compat.h>
96580+#include <linux/module.h>
96581 #include <linux/errno.h>
96582 #include <linux/time.h>
96583 #include <linux/signal.h>
96584@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
96585 mm_segment_t oldfs;
96586 long ret;
96587
96588- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
96589+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
96590 oldfs = get_fs();
96591 set_fs(KERNEL_DS);
96592 ret = hrtimer_nanosleep_restart(restart);
96593@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
96594 oldfs = get_fs();
96595 set_fs(KERNEL_DS);
96596 ret = hrtimer_nanosleep(&tu,
96597- rmtp ? (struct timespec __user *)&rmt : NULL,
96598+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
96599 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
96600 set_fs(oldfs);
96601
96602@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
96603 mm_segment_t old_fs = get_fs();
96604
96605 set_fs(KERNEL_DS);
96606- ret = sys_sigpending((old_sigset_t __user *) &s);
96607+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
96608 set_fs(old_fs);
96609 if (ret == 0)
96610 ret = put_user(s, set);
96611@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
96612 mm_segment_t old_fs = get_fs();
96613
96614 set_fs(KERNEL_DS);
96615- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
96616+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
96617 set_fs(old_fs);
96618
96619 if (!ret) {
96620@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
96621 set_fs (KERNEL_DS);
96622 ret = sys_wait4(pid,
96623 (stat_addr ?
96624- (unsigned int __user *) &status : NULL),
96625- options, (struct rusage __user *) &r);
96626+ (unsigned int __force_user *) &status : NULL),
96627+ options, (struct rusage __force_user *) &r);
96628 set_fs (old_fs);
96629
96630 if (ret > 0) {
96631@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
96632 memset(&info, 0, sizeof(info));
96633
96634 set_fs(KERNEL_DS);
96635- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
96636- uru ? (struct rusage __user *)&ru : NULL);
96637+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
96638+ uru ? (struct rusage __force_user *)&ru : NULL);
96639 set_fs(old_fs);
96640
96641 if ((ret < 0) || (info.si_signo == 0))
96642@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
96643 oldfs = get_fs();
96644 set_fs(KERNEL_DS);
96645 err = sys_timer_settime(timer_id, flags,
96646- (struct itimerspec __user *) &newts,
96647- (struct itimerspec __user *) &oldts);
96648+ (struct itimerspec __force_user *) &newts,
96649+ (struct itimerspec __force_user *) &oldts);
96650 set_fs(oldfs);
96651 if (!err && old && put_compat_itimerspec(old, &oldts))
96652 return -EFAULT;
96653@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
96654 oldfs = get_fs();
96655 set_fs(KERNEL_DS);
96656 err = sys_timer_gettime(timer_id,
96657- (struct itimerspec __user *) &ts);
96658+ (struct itimerspec __force_user *) &ts);
96659 set_fs(oldfs);
96660 if (!err && put_compat_itimerspec(setting, &ts))
96661 return -EFAULT;
96662@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
96663 oldfs = get_fs();
96664 set_fs(KERNEL_DS);
96665 err = sys_clock_settime(which_clock,
96666- (struct timespec __user *) &ts);
96667+ (struct timespec __force_user *) &ts);
96668 set_fs(oldfs);
96669 return err;
96670 }
96671@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
96672 oldfs = get_fs();
96673 set_fs(KERNEL_DS);
96674 err = sys_clock_gettime(which_clock,
96675- (struct timespec __user *) &ts);
96676+ (struct timespec __force_user *) &ts);
96677 set_fs(oldfs);
96678 if (!err && compat_put_timespec(&ts, tp))
96679 return -EFAULT;
96680@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
96681
96682 oldfs = get_fs();
96683 set_fs(KERNEL_DS);
96684- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
96685+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
96686 set_fs(oldfs);
96687
96688 err = compat_put_timex(utp, &txc);
96689@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
96690 oldfs = get_fs();
96691 set_fs(KERNEL_DS);
96692 err = sys_clock_getres(which_clock,
96693- (struct timespec __user *) &ts);
96694+ (struct timespec __force_user *) &ts);
96695 set_fs(oldfs);
96696 if (!err && tp && compat_put_timespec(&ts, tp))
96697 return -EFAULT;
96698@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
96699 struct timespec tu;
96700 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
96701
96702- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
96703+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
96704 oldfs = get_fs();
96705 set_fs(KERNEL_DS);
96706 err = clock_nanosleep_restart(restart);
96707@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
96708 oldfs = get_fs();
96709 set_fs(KERNEL_DS);
96710 err = sys_clock_nanosleep(which_clock, flags,
96711- (struct timespec __user *) &in,
96712- (struct timespec __user *) &out);
96713+ (struct timespec __force_user *) &in,
96714+ (struct timespec __force_user *) &out);
96715 set_fs(oldfs);
96716
96717 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
96718@@ -1145,7 +1146,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
96719 mm_segment_t old_fs = get_fs();
96720
96721 set_fs(KERNEL_DS);
96722- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
96723+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
96724 set_fs(old_fs);
96725 if (compat_put_timespec(&t, interval))
96726 return -EFAULT;
96727diff --git a/kernel/configs.c b/kernel/configs.c
96728index c18b1f1..b9a0132 100644
96729--- a/kernel/configs.c
96730+++ b/kernel/configs.c
96731@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
96732 struct proc_dir_entry *entry;
96733
96734 /* create the current config file */
96735+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
96736+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
96737+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
96738+ &ikconfig_file_ops);
96739+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
96740+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
96741+ &ikconfig_file_ops);
96742+#endif
96743+#else
96744 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
96745 &ikconfig_file_ops);
96746+#endif
96747+
96748 if (!entry)
96749 return -ENOMEM;
96750
96751diff --git a/kernel/cred.c b/kernel/cred.c
96752index e0573a4..26c0fd3 100644
96753--- a/kernel/cred.c
96754+++ b/kernel/cred.c
96755@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
96756 validate_creds(cred);
96757 alter_cred_subscribers(cred, -1);
96758 put_cred(cred);
96759+
96760+#ifdef CONFIG_GRKERNSEC_SETXID
96761+ cred = (struct cred *) tsk->delayed_cred;
96762+ if (cred != NULL) {
96763+ tsk->delayed_cred = NULL;
96764+ validate_creds(cred);
96765+ alter_cred_subscribers(cred, -1);
96766+ put_cred(cred);
96767+ }
96768+#endif
96769 }
96770
96771 /**
96772@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
96773 * Always returns 0 thus allowing this function to be tail-called at the end
96774 * of, say, sys_setgid().
96775 */
96776-int commit_creds(struct cred *new)
96777+static int __commit_creds(struct cred *new)
96778 {
96779 struct task_struct *task = current;
96780 const struct cred *old = task->real_cred;
96781@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
96782
96783 get_cred(new); /* we will require a ref for the subj creds too */
96784
96785+ gr_set_role_label(task, new->uid, new->gid);
96786+
96787 /* dumpability changes */
96788 if (!uid_eq(old->euid, new->euid) ||
96789 !gid_eq(old->egid, new->egid) ||
96790@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
96791 put_cred(old);
96792 return 0;
96793 }
96794+#ifdef CONFIG_GRKERNSEC_SETXID
96795+extern int set_user(struct cred *new);
96796+
96797+void gr_delayed_cred_worker(void)
96798+{
96799+ const struct cred *new = current->delayed_cred;
96800+ struct cred *ncred;
96801+
96802+ current->delayed_cred = NULL;
96803+
96804+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
96805+ // from doing get_cred on it when queueing this
96806+ put_cred(new);
96807+ return;
96808+ } else if (new == NULL)
96809+ return;
96810+
96811+ ncred = prepare_creds();
96812+ if (!ncred)
96813+ goto die;
96814+ // uids
96815+ ncred->uid = new->uid;
96816+ ncred->euid = new->euid;
96817+ ncred->suid = new->suid;
96818+ ncred->fsuid = new->fsuid;
96819+ // gids
96820+ ncred->gid = new->gid;
96821+ ncred->egid = new->egid;
96822+ ncred->sgid = new->sgid;
96823+ ncred->fsgid = new->fsgid;
96824+ // groups
96825+ set_groups(ncred, new->group_info);
96826+ // caps
96827+ ncred->securebits = new->securebits;
96828+ ncred->cap_inheritable = new->cap_inheritable;
96829+ ncred->cap_permitted = new->cap_permitted;
96830+ ncred->cap_effective = new->cap_effective;
96831+ ncred->cap_bset = new->cap_bset;
96832+
96833+ if (set_user(ncred)) {
96834+ abort_creds(ncred);
96835+ goto die;
96836+ }
96837+
96838+ // from doing get_cred on it when queueing this
96839+ put_cred(new);
96840+
96841+ __commit_creds(ncred);
96842+ return;
96843+die:
96844+ // from doing get_cred on it when queueing this
96845+ put_cred(new);
96846+ do_group_exit(SIGKILL);
96847+}
96848+#endif
96849+
96850+int commit_creds(struct cred *new)
96851+{
96852+#ifdef CONFIG_GRKERNSEC_SETXID
96853+ int ret;
96854+ int schedule_it = 0;
96855+ struct task_struct *t;
96856+ unsigned oldsecurebits = current_cred()->securebits;
96857+
96858+ /* we won't get called with tasklist_lock held for writing
96859+ and interrupts disabled as the cred struct in that case is
96860+ init_cred
96861+ */
96862+ if (grsec_enable_setxid && !current_is_single_threaded() &&
96863+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
96864+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
96865+ schedule_it = 1;
96866+ }
96867+ ret = __commit_creds(new);
96868+ if (schedule_it) {
96869+ rcu_read_lock();
96870+ read_lock(&tasklist_lock);
96871+ for (t = next_thread(current); t != current;
96872+ t = next_thread(t)) {
96873+ /* we'll check if the thread has uid 0 in
96874+ * the delayed worker routine
96875+ */
96876+ if (task_securebits(t) == oldsecurebits &&
96877+ t->delayed_cred == NULL) {
96878+ t->delayed_cred = get_cred(new);
96879+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
96880+ set_tsk_need_resched(t);
96881+ }
96882+ }
96883+ read_unlock(&tasklist_lock);
96884+ rcu_read_unlock();
96885+ }
96886+
96887+ return ret;
96888+#else
96889+ return __commit_creds(new);
96890+#endif
96891+}
96892+
96893 EXPORT_SYMBOL(commit_creds);
96894
96895 /**
96896diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
96897index 0874e2e..5b32cc9 100644
96898--- a/kernel/debug/debug_core.c
96899+++ b/kernel/debug/debug_core.c
96900@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
96901 */
96902 static atomic_t masters_in_kgdb;
96903 static atomic_t slaves_in_kgdb;
96904-static atomic_t kgdb_break_tasklet_var;
96905+static atomic_unchecked_t kgdb_break_tasklet_var;
96906 atomic_t kgdb_setting_breakpoint;
96907
96908 struct task_struct *kgdb_usethread;
96909@@ -137,7 +137,7 @@ int kgdb_single_step;
96910 static pid_t kgdb_sstep_pid;
96911
96912 /* to keep track of the CPU which is doing the single stepping*/
96913-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
96914+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
96915
96916 /*
96917 * If you are debugging a problem where roundup (the collection of
96918@@ -552,7 +552,7 @@ return_normal:
96919 * kernel will only try for the value of sstep_tries before
96920 * giving up and continuing on.
96921 */
96922- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
96923+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
96924 (kgdb_info[cpu].task &&
96925 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
96926 atomic_set(&kgdb_active, -1);
96927@@ -654,8 +654,8 @@ cpu_master_loop:
96928 }
96929
96930 kgdb_restore:
96931- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
96932- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
96933+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
96934+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
96935 if (kgdb_info[sstep_cpu].task)
96936 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
96937 else
96938@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
96939 static void kgdb_tasklet_bpt(unsigned long ing)
96940 {
96941 kgdb_breakpoint();
96942- atomic_set(&kgdb_break_tasklet_var, 0);
96943+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
96944 }
96945
96946 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
96947
96948 void kgdb_schedule_breakpoint(void)
96949 {
96950- if (atomic_read(&kgdb_break_tasklet_var) ||
96951+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
96952 atomic_read(&kgdb_active) != -1 ||
96953 atomic_read(&kgdb_setting_breakpoint))
96954 return;
96955- atomic_inc(&kgdb_break_tasklet_var);
96956+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
96957 tasklet_schedule(&kgdb_tasklet_breakpoint);
96958 }
96959 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
96960diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
96961index 41213454..861e178 100644
96962--- a/kernel/debug/kdb/kdb_main.c
96963+++ b/kernel/debug/kdb/kdb_main.c
96964@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
96965 continue;
96966
96967 kdb_printf("%-20s%8u 0x%p ", mod->name,
96968- mod->core_size, (void *)mod);
96969+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
96970 #ifdef CONFIG_MODULE_UNLOAD
96971 kdb_printf("%4d ", module_refcount(mod));
96972 #endif
96973@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
96974 kdb_printf(" (Loading)");
96975 else
96976 kdb_printf(" (Live)");
96977- kdb_printf(" 0x%p", mod->module_core);
96978+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
96979
96980 #ifdef CONFIG_MODULE_UNLOAD
96981 {
96982diff --git a/kernel/events/core.c b/kernel/events/core.c
96983index 2fabc06..79cceec 100644
96984--- a/kernel/events/core.c
96985+++ b/kernel/events/core.c
96986@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
96987 * 0 - disallow raw tracepoint access for unpriv
96988 * 1 - disallow cpu events for unpriv
96989 * 2 - disallow kernel profiling for unpriv
96990+ * 3 - disallow all unpriv perf event use
96991 */
96992-int sysctl_perf_event_paranoid __read_mostly = 1;
96993+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
96994+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
96995+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
96996+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
96997+#else
96998+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
96999+#endif
97000
97001 /* Minimum for 512 kiB + 1 user control page */
97002 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
97003@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
97004
97005 tmp *= sysctl_perf_cpu_time_max_percent;
97006 do_div(tmp, 100);
97007- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
97008+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
97009 }
97010
97011 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
97012@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
97013 }
97014 }
97015
97016-static atomic64_t perf_event_id;
97017+static atomic64_unchecked_t perf_event_id;
97018
97019 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
97020 enum event_type_t event_type);
97021@@ -3220,7 +3227,7 @@ static void __perf_event_read(void *info)
97022
97023 static inline u64 perf_event_count(struct perf_event *event)
97024 {
97025- return local64_read(&event->count) + atomic64_read(&event->child_count);
97026+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
97027 }
97028
97029 static u64 perf_event_read(struct perf_event *event)
97030@@ -3656,9 +3663,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
97031 mutex_lock(&event->child_mutex);
97032 total += perf_event_read(event);
97033 *enabled += event->total_time_enabled +
97034- atomic64_read(&event->child_total_time_enabled);
97035+ atomic64_read_unchecked(&event->child_total_time_enabled);
97036 *running += event->total_time_running +
97037- atomic64_read(&event->child_total_time_running);
97038+ atomic64_read_unchecked(&event->child_total_time_running);
97039
97040 list_for_each_entry(child, &event->child_list, child_list) {
97041 total += perf_event_read(child);
97042@@ -4147,10 +4154,10 @@ void perf_event_update_userpage(struct perf_event *event)
97043 userpg->offset -= local64_read(&event->hw.prev_count);
97044
97045 userpg->time_enabled = enabled +
97046- atomic64_read(&event->child_total_time_enabled);
97047+ atomic64_read_unchecked(&event->child_total_time_enabled);
97048
97049 userpg->time_running = running +
97050- atomic64_read(&event->child_total_time_running);
97051+ atomic64_read_unchecked(&event->child_total_time_running);
97052
97053 arch_perf_update_userpage(event, userpg, now);
97054
97055@@ -4740,7 +4747,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
97056
97057 /* Data. */
97058 sp = perf_user_stack_pointer(regs);
97059- rem = __output_copy_user(handle, (void *) sp, dump_size);
97060+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
97061 dyn_size = dump_size - rem;
97062
97063 perf_output_skip(handle, rem);
97064@@ -4831,11 +4838,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
97065 values[n++] = perf_event_count(event);
97066 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
97067 values[n++] = enabled +
97068- atomic64_read(&event->child_total_time_enabled);
97069+ atomic64_read_unchecked(&event->child_total_time_enabled);
97070 }
97071 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
97072 values[n++] = running +
97073- atomic64_read(&event->child_total_time_running);
97074+ atomic64_read_unchecked(&event->child_total_time_running);
97075 }
97076 if (read_format & PERF_FORMAT_ID)
97077 values[n++] = primary_event_id(event);
97078@@ -7180,7 +7187,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
97079 event->parent = parent_event;
97080
97081 event->ns = get_pid_ns(task_active_pid_ns(current));
97082- event->id = atomic64_inc_return(&perf_event_id);
97083+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
97084
97085 event->state = PERF_EVENT_STATE_INACTIVE;
97086
97087@@ -7470,6 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
97088 if (flags & ~PERF_FLAG_ALL)
97089 return -EINVAL;
97090
97091+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
97092+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
97093+ return -EACCES;
97094+#endif
97095+
97096 err = perf_copy_attr(attr_uptr, &attr);
97097 if (err)
97098 return err;
97099@@ -7892,10 +7904,10 @@ static void sync_child_event(struct perf_event *child_event,
97100 /*
97101 * Add back the child's count to the parent's count:
97102 */
97103- atomic64_add(child_val, &parent_event->child_count);
97104- atomic64_add(child_event->total_time_enabled,
97105+ atomic64_add_unchecked(child_val, &parent_event->child_count);
97106+ atomic64_add_unchecked(child_event->total_time_enabled,
97107 &parent_event->child_total_time_enabled);
97108- atomic64_add(child_event->total_time_running,
97109+ atomic64_add_unchecked(child_event->total_time_running,
97110 &parent_event->child_total_time_running);
97111
97112 /*
97113diff --git a/kernel/events/internal.h b/kernel/events/internal.h
97114index 569b2187..19940d9 100644
97115--- a/kernel/events/internal.h
97116+++ b/kernel/events/internal.h
97117@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
97118 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
97119 }
97120
97121-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
97122+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
97123 static inline unsigned long \
97124 func_name(struct perf_output_handle *handle, \
97125- const void *buf, unsigned long len) \
97126+ const void user *buf, unsigned long len) \
97127 { \
97128 unsigned long size, written; \
97129 \
97130@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
97131 return 0;
97132 }
97133
97134-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
97135+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
97136
97137 static inline unsigned long
97138 memcpy_skip(void *dst, const void *src, unsigned long n)
97139@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
97140 return 0;
97141 }
97142
97143-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
97144+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
97145
97146 #ifndef arch_perf_out_copy_user
97147 #define arch_perf_out_copy_user arch_perf_out_copy_user
97148@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
97149 }
97150 #endif
97151
97152-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
97153+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
97154
97155 /* Callchain handling */
97156 extern struct perf_callchain_entry *
97157diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
97158index cb346f2..e4dc317 100644
97159--- a/kernel/events/uprobes.c
97160+++ b/kernel/events/uprobes.c
97161@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
97162 {
97163 struct page *page;
97164 uprobe_opcode_t opcode;
97165- int result;
97166+ long result;
97167
97168 pagefault_disable();
97169 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
97170diff --git a/kernel/exit.c b/kernel/exit.c
97171index feff10b..f623dd5 100644
97172--- a/kernel/exit.c
97173+++ b/kernel/exit.c
97174@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
97175 struct task_struct *leader;
97176 int zap_leader;
97177 repeat:
97178+#ifdef CONFIG_NET
97179+ gr_del_task_from_ip_table(p);
97180+#endif
97181+
97182 /* don't need to get the RCU readlock here - the process is dead and
97183 * can't be modifying its own credentials. But shut RCU-lockdep up */
97184 rcu_read_lock();
97185@@ -656,6 +660,8 @@ void do_exit(long code)
97186 int group_dead;
97187 TASKS_RCU(int tasks_rcu_i);
97188
97189+ set_fs(USER_DS);
97190+
97191 profile_task_exit(tsk);
97192
97193 WARN_ON(blk_needs_flush_plug(tsk));
97194@@ -672,7 +678,6 @@ void do_exit(long code)
97195 * mm_release()->clear_child_tid() from writing to a user-controlled
97196 * kernel address.
97197 */
97198- set_fs(USER_DS);
97199
97200 ptrace_event(PTRACE_EVENT_EXIT, code);
97201
97202@@ -730,6 +735,9 @@ void do_exit(long code)
97203 tsk->exit_code = code;
97204 taskstats_exit(tsk, group_dead);
97205
97206+ gr_acl_handle_psacct(tsk, code);
97207+ gr_acl_handle_exit();
97208+
97209 exit_mm(tsk);
97210
97211 if (group_dead)
97212@@ -849,7 +857,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
97213 * Take down every thread in the group. This is called by fatal signals
97214 * as well as by sys_exit_group (below).
97215 */
97216-void
97217+__noreturn void
97218 do_group_exit(int exit_code)
97219 {
97220 struct signal_struct *sig = current->signal;
97221diff --git a/kernel/fork.c b/kernel/fork.c
97222index cf65139..704476e 100644
97223--- a/kernel/fork.c
97224+++ b/kernel/fork.c
97225@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
97226 void thread_info_cache_init(void)
97227 {
97228 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
97229- THREAD_SIZE, 0, NULL);
97230+ THREAD_SIZE, SLAB_USERCOPY, NULL);
97231 BUG_ON(thread_info_cache == NULL);
97232 }
97233 # endif
97234 #endif
97235
97236+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97237+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
97238+ int node, void **lowmem_stack)
97239+{
97240+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
97241+ void *ret = NULL;
97242+ unsigned int i;
97243+
97244+ *lowmem_stack = alloc_thread_info_node(tsk, node);
97245+ if (*lowmem_stack == NULL)
97246+ goto out;
97247+
97248+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
97249+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
97250+
97251+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
97252+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
97253+ if (ret == NULL) {
97254+ free_thread_info(*lowmem_stack);
97255+ *lowmem_stack = NULL;
97256+ }
97257+
97258+out:
97259+ return ret;
97260+}
97261+
97262+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
97263+{
97264+ unmap_process_stacks(tsk);
97265+}
97266+#else
97267+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
97268+ int node, void **lowmem_stack)
97269+{
97270+ return alloc_thread_info_node(tsk, node);
97271+}
97272+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
97273+{
97274+ free_thread_info(ti);
97275+}
97276+#endif
97277+
97278 /* SLAB cache for signal_struct structures (tsk->signal) */
97279 static struct kmem_cache *signal_cachep;
97280
97281@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
97282 /* SLAB cache for mm_struct structures (tsk->mm) */
97283 static struct kmem_cache *mm_cachep;
97284
97285-static void account_kernel_stack(struct thread_info *ti, int account)
97286+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
97287 {
97288+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97289+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
97290+#else
97291 struct zone *zone = page_zone(virt_to_page(ti));
97292+#endif
97293
97294 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
97295 }
97296
97297 void free_task(struct task_struct *tsk)
97298 {
97299- account_kernel_stack(tsk->stack, -1);
97300+ account_kernel_stack(tsk, tsk->stack, -1);
97301 arch_release_thread_info(tsk->stack);
97302- free_thread_info(tsk->stack);
97303+ gr_free_thread_info(tsk, tsk->stack);
97304 rt_mutex_debug_task_free(tsk);
97305 ftrace_graph_exit_task(tsk);
97306 put_seccomp_filter(tsk);
97307@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97308 {
97309 struct task_struct *tsk;
97310 struct thread_info *ti;
97311+ void *lowmem_stack;
97312 int node = tsk_fork_get_node(orig);
97313 int err;
97314
97315@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97316 if (!tsk)
97317 return NULL;
97318
97319- ti = alloc_thread_info_node(tsk, node);
97320+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
97321 if (!ti)
97322 goto free_tsk;
97323
97324@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97325 goto free_ti;
97326
97327 tsk->stack = ti;
97328+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97329+ tsk->lowmem_stack = lowmem_stack;
97330+#endif
97331 #ifdef CONFIG_SECCOMP
97332 /*
97333 * We must handle setting up seccomp filters once we're under
97334@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97335 set_task_stack_end_magic(tsk);
97336
97337 #ifdef CONFIG_CC_STACKPROTECTOR
97338- tsk->stack_canary = get_random_int();
97339+ tsk->stack_canary = pax_get_random_long();
97340 #endif
97341
97342 /*
97343@@ -352,24 +402,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97344 tsk->splice_pipe = NULL;
97345 tsk->task_frag.page = NULL;
97346
97347- account_kernel_stack(ti, 1);
97348+ account_kernel_stack(tsk, ti, 1);
97349
97350 return tsk;
97351
97352 free_ti:
97353- free_thread_info(ti);
97354+ gr_free_thread_info(tsk, ti);
97355 free_tsk:
97356 free_task_struct(tsk);
97357 return NULL;
97358 }
97359
97360 #ifdef CONFIG_MMU
97361-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97362+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
97363+{
97364+ struct vm_area_struct *tmp;
97365+ unsigned long charge;
97366+ struct file *file;
97367+ int retval;
97368+
97369+ charge = 0;
97370+ if (mpnt->vm_flags & VM_ACCOUNT) {
97371+ unsigned long len = vma_pages(mpnt);
97372+
97373+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
97374+ goto fail_nomem;
97375+ charge = len;
97376+ }
97377+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97378+ if (!tmp)
97379+ goto fail_nomem;
97380+ *tmp = *mpnt;
97381+ tmp->vm_mm = mm;
97382+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
97383+ retval = vma_dup_policy(mpnt, tmp);
97384+ if (retval)
97385+ goto fail_nomem_policy;
97386+ if (anon_vma_fork(tmp, mpnt))
97387+ goto fail_nomem_anon_vma_fork;
97388+ tmp->vm_flags &= ~VM_LOCKED;
97389+ tmp->vm_next = tmp->vm_prev = NULL;
97390+ tmp->vm_mirror = NULL;
97391+ file = tmp->vm_file;
97392+ if (file) {
97393+ struct inode *inode = file_inode(file);
97394+ struct address_space *mapping = file->f_mapping;
97395+
97396+ get_file(file);
97397+ if (tmp->vm_flags & VM_DENYWRITE)
97398+ atomic_dec(&inode->i_writecount);
97399+ i_mmap_lock_write(mapping);
97400+ if (tmp->vm_flags & VM_SHARED)
97401+ atomic_inc(&mapping->i_mmap_writable);
97402+ flush_dcache_mmap_lock(mapping);
97403+ /* insert tmp into the share list, just after mpnt */
97404+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
97405+ flush_dcache_mmap_unlock(mapping);
97406+ i_mmap_unlock_write(mapping);
97407+ }
97408+
97409+ /*
97410+ * Clear hugetlb-related page reserves for children. This only
97411+ * affects MAP_PRIVATE mappings. Faults generated by the child
97412+ * are not guaranteed to succeed, even if read-only
97413+ */
97414+ if (is_vm_hugetlb_page(tmp))
97415+ reset_vma_resv_huge_pages(tmp);
97416+
97417+ return tmp;
97418+
97419+fail_nomem_anon_vma_fork:
97420+ mpol_put(vma_policy(tmp));
97421+fail_nomem_policy:
97422+ kmem_cache_free(vm_area_cachep, tmp);
97423+fail_nomem:
97424+ vm_unacct_memory(charge);
97425+ return NULL;
97426+}
97427+
97428+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97429 {
97430 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
97431 struct rb_node **rb_link, *rb_parent;
97432 int retval;
97433- unsigned long charge;
97434
97435 uprobe_start_dup_mmap();
97436 down_write(&oldmm->mmap_sem);
97437@@ -397,51 +512,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97438
97439 prev = NULL;
97440 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
97441- struct file *file;
97442-
97443 if (mpnt->vm_flags & VM_DONTCOPY) {
97444 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
97445 -vma_pages(mpnt));
97446 continue;
97447 }
97448- charge = 0;
97449- if (mpnt->vm_flags & VM_ACCOUNT) {
97450- unsigned long len = vma_pages(mpnt);
97451-
97452- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
97453- goto fail_nomem;
97454- charge = len;
97455- }
97456- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97457- if (!tmp)
97458- goto fail_nomem;
97459- *tmp = *mpnt;
97460- INIT_LIST_HEAD(&tmp->anon_vma_chain);
97461- retval = vma_dup_policy(mpnt, tmp);
97462- if (retval)
97463- goto fail_nomem_policy;
97464- tmp->vm_mm = mm;
97465- if (anon_vma_fork(tmp, mpnt))
97466- goto fail_nomem_anon_vma_fork;
97467- tmp->vm_flags &= ~VM_LOCKED;
97468- tmp->vm_next = tmp->vm_prev = NULL;
97469- file = tmp->vm_file;
97470- if (file) {
97471- struct inode *inode = file_inode(file);
97472- struct address_space *mapping = file->f_mapping;
97473-
97474- get_file(file);
97475- if (tmp->vm_flags & VM_DENYWRITE)
97476- atomic_dec(&inode->i_writecount);
97477- i_mmap_lock_write(mapping);
97478- if (tmp->vm_flags & VM_SHARED)
97479- atomic_inc(&mapping->i_mmap_writable);
97480- flush_dcache_mmap_lock(mapping);
97481- /* insert tmp into the share list, just after mpnt */
97482- vma_interval_tree_insert_after(tmp, mpnt,
97483- &mapping->i_mmap);
97484- flush_dcache_mmap_unlock(mapping);
97485- i_mmap_unlock_write(mapping);
97486+ tmp = dup_vma(mm, oldmm, mpnt);
97487+ if (!tmp) {
97488+ retval = -ENOMEM;
97489+ goto out;
97490 }
97491
97492 /*
97493@@ -473,6 +552,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97494 if (retval)
97495 goto out;
97496 }
97497+
97498+#ifdef CONFIG_PAX_SEGMEXEC
97499+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
97500+ struct vm_area_struct *mpnt_m;
97501+
97502+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
97503+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
97504+
97505+ if (!mpnt->vm_mirror)
97506+ continue;
97507+
97508+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
97509+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
97510+ mpnt->vm_mirror = mpnt_m;
97511+ } else {
97512+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
97513+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
97514+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
97515+ mpnt->vm_mirror->vm_mirror = mpnt;
97516+ }
97517+ }
97518+ BUG_ON(mpnt_m);
97519+ }
97520+#endif
97521+
97522 /* a new mm has just been created */
97523 arch_dup_mmap(oldmm, mm);
97524 retval = 0;
97525@@ -482,14 +586,6 @@ out:
97526 up_write(&oldmm->mmap_sem);
97527 uprobe_end_dup_mmap();
97528 return retval;
97529-fail_nomem_anon_vma_fork:
97530- mpol_put(vma_policy(tmp));
97531-fail_nomem_policy:
97532- kmem_cache_free(vm_area_cachep, tmp);
97533-fail_nomem:
97534- retval = -ENOMEM;
97535- vm_unacct_memory(charge);
97536- goto out;
97537 }
97538
97539 static inline int mm_alloc_pgd(struct mm_struct *mm)
97540@@ -739,8 +835,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
97541 return ERR_PTR(err);
97542
97543 mm = get_task_mm(task);
97544- if (mm && mm != current->mm &&
97545- !ptrace_may_access(task, mode)) {
97546+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
97547+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
97548 mmput(mm);
97549 mm = ERR_PTR(-EACCES);
97550 }
97551@@ -943,13 +1039,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
97552 spin_unlock(&fs->lock);
97553 return -EAGAIN;
97554 }
97555- fs->users++;
97556+ atomic_inc(&fs->users);
97557 spin_unlock(&fs->lock);
97558 return 0;
97559 }
97560 tsk->fs = copy_fs_struct(fs);
97561 if (!tsk->fs)
97562 return -ENOMEM;
97563+ /* Carry through gr_chroot_dentry and is_chrooted instead
97564+ of recomputing it here. Already copied when the task struct
97565+ is duplicated. This allows pivot_root to not be treated as
97566+ a chroot
97567+ */
97568+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
97569+
97570 return 0;
97571 }
97572
97573@@ -1187,7 +1290,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
97574 * parts of the process environment (as per the clone
97575 * flags). The actual kick-off is left to the caller.
97576 */
97577-static struct task_struct *copy_process(unsigned long clone_flags,
97578+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
97579 unsigned long stack_start,
97580 unsigned long stack_size,
97581 int __user *child_tidptr,
97582@@ -1258,6 +1361,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
97583 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
97584 #endif
97585 retval = -EAGAIN;
97586+
97587+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
97588+
97589 if (atomic_read(&p->real_cred->user->processes) >=
97590 task_rlimit(p, RLIMIT_NPROC)) {
97591 if (p->real_cred->user != INIT_USER &&
97592@@ -1507,6 +1613,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
97593 goto bad_fork_free_pid;
97594 }
97595
97596+ /* synchronizes with gr_set_acls()
97597+ we need to call this past the point of no return for fork()
97598+ */
97599+ gr_copy_label(p);
97600+
97601 if (likely(p->pid)) {
97602 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
97603
97604@@ -1597,6 +1708,8 @@ bad_fork_cleanup_count:
97605 bad_fork_free:
97606 free_task(p);
97607 fork_out:
97608+ gr_log_forkfail(retval);
97609+
97610 return ERR_PTR(retval);
97611 }
97612
97613@@ -1658,6 +1771,7 @@ long do_fork(unsigned long clone_flags,
97614
97615 p = copy_process(clone_flags, stack_start, stack_size,
97616 child_tidptr, NULL, trace);
97617+ add_latent_entropy();
97618 /*
97619 * Do this prior waking up the new thread - the thread pointer
97620 * might get invalid after that point, if the thread exits quickly.
97621@@ -1674,6 +1788,8 @@ long do_fork(unsigned long clone_flags,
97622 if (clone_flags & CLONE_PARENT_SETTID)
97623 put_user(nr, parent_tidptr);
97624
97625+ gr_handle_brute_check();
97626+
97627 if (clone_flags & CLONE_VFORK) {
97628 p->vfork_done = &vfork;
97629 init_completion(&vfork);
97630@@ -1792,7 +1908,7 @@ void __init proc_caches_init(void)
97631 mm_cachep = kmem_cache_create("mm_struct",
97632 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
97633 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
97634- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
97635+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
97636 mmap_init();
97637 nsproxy_cache_init();
97638 }
97639@@ -1832,7 +1948,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
97640 return 0;
97641
97642 /* don't need lock here; in the worst case we'll do useless copy */
97643- if (fs->users == 1)
97644+ if (atomic_read(&fs->users) == 1)
97645 return 0;
97646
97647 *new_fsp = copy_fs_struct(fs);
97648@@ -1944,7 +2060,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
97649 fs = current->fs;
97650 spin_lock(&fs->lock);
97651 current->fs = new_fs;
97652- if (--fs->users)
97653+ gr_set_chroot_entries(current, &current->fs->root);
97654+ if (atomic_dec_return(&fs->users))
97655 new_fs = NULL;
97656 else
97657 new_fs = fs;
97658diff --git a/kernel/futex.c b/kernel/futex.c
97659index 2a5e383..878bac6 100644
97660--- a/kernel/futex.c
97661+++ b/kernel/futex.c
97662@@ -201,7 +201,7 @@ struct futex_pi_state {
97663 atomic_t refcount;
97664
97665 union futex_key key;
97666-};
97667+} __randomize_layout;
97668
97669 /**
97670 * struct futex_q - The hashed futex queue entry, one per waiting task
97671@@ -235,7 +235,7 @@ struct futex_q {
97672 struct rt_mutex_waiter *rt_waiter;
97673 union futex_key *requeue_pi_key;
97674 u32 bitset;
97675-};
97676+} __randomize_layout;
97677
97678 static const struct futex_q futex_q_init = {
97679 /* list gets initialized in queue_me()*/
97680@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
97681 struct page *page, *page_head;
97682 int err, ro = 0;
97683
97684+#ifdef CONFIG_PAX_SEGMEXEC
97685+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
97686+ return -EFAULT;
97687+#endif
97688+
97689 /*
97690 * The futex address must be "naturally" aligned.
97691 */
97692@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
97693
97694 static int get_futex_value_locked(u32 *dest, u32 __user *from)
97695 {
97696- int ret;
97697+ unsigned long ret;
97698
97699 pagefault_disable();
97700 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
97701@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
97702 {
97703 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
97704 u32 curval;
97705+ mm_segment_t oldfs;
97706
97707 /*
97708 * This will fail and we want it. Some arch implementations do
97709@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
97710 * implementation, the non-functional ones will return
97711 * -ENOSYS.
97712 */
97713+ oldfs = get_fs();
97714+ set_fs(USER_DS);
97715 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
97716 futex_cmpxchg_enabled = 1;
97717+ set_fs(oldfs);
97718 #endif
97719 }
97720
97721diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
97722index 55c8c93..9ba7ad6 100644
97723--- a/kernel/futex_compat.c
97724+++ b/kernel/futex_compat.c
97725@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
97726 return 0;
97727 }
97728
97729-static void __user *futex_uaddr(struct robust_list __user *entry,
97730+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
97731 compat_long_t futex_offset)
97732 {
97733 compat_uptr_t base = ptr_to_compat(entry);
97734diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
97735index b358a80..fc25240 100644
97736--- a/kernel/gcov/base.c
97737+++ b/kernel/gcov/base.c
97738@@ -114,11 +114,6 @@ void gcov_enable_events(void)
97739 }
97740
97741 #ifdef CONFIG_MODULES
97742-static inline int within(void *addr, void *start, unsigned long size)
97743-{
97744- return ((addr >= start) && (addr < start + size));
97745-}
97746-
97747 /* Update list and generate events when modules are unloaded. */
97748 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
97749 void *data)
97750@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
97751
97752 /* Remove entries located in module from linked list. */
97753 while ((info = gcov_info_next(info))) {
97754- if (within(info, mod->module_core, mod->core_size)) {
97755+ if (within_module_core_rw((unsigned long)info, mod)) {
97756 gcov_info_unlink(prev, info);
97757 if (gcov_events_enabled)
97758 gcov_event(GCOV_REMOVE, info);
97759diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
97760index 886d09e..c7ff4e5 100644
97761--- a/kernel/irq/manage.c
97762+++ b/kernel/irq/manage.c
97763@@ -874,7 +874,7 @@ static int irq_thread(void *data)
97764
97765 action_ret = handler_fn(desc, action);
97766 if (action_ret == IRQ_HANDLED)
97767- atomic_inc(&desc->threads_handled);
97768+ atomic_inc_unchecked(&desc->threads_handled);
97769
97770 wake_threads_waitq(desc);
97771 }
97772diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
97773index e2514b0..de3dfe0 100644
97774--- a/kernel/irq/spurious.c
97775+++ b/kernel/irq/spurious.c
97776@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
97777 * count. We just care about the count being
97778 * different than the one we saw before.
97779 */
97780- handled = atomic_read(&desc->threads_handled);
97781+ handled = atomic_read_unchecked(&desc->threads_handled);
97782 handled |= SPURIOUS_DEFERRED;
97783 if (handled != desc->threads_handled_last) {
97784 action_ret = IRQ_HANDLED;
97785diff --git a/kernel/jump_label.c b/kernel/jump_label.c
97786index 9019f15..9a3c42e 100644
97787--- a/kernel/jump_label.c
97788+++ b/kernel/jump_label.c
97789@@ -14,6 +14,7 @@
97790 #include <linux/err.h>
97791 #include <linux/static_key.h>
97792 #include <linux/jump_label_ratelimit.h>
97793+#include <linux/mm.h>
97794
97795 #ifdef HAVE_JUMP_LABEL
97796
97797@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
97798
97799 size = (((unsigned long)stop - (unsigned long)start)
97800 / sizeof(struct jump_entry));
97801+ pax_open_kernel();
97802 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
97803+ pax_close_kernel();
97804 }
97805
97806 static void jump_label_update(struct static_key *key, int enable);
97807@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
97808 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
97809 struct jump_entry *iter;
97810
97811+ pax_open_kernel();
97812 for (iter = iter_start; iter < iter_stop; iter++) {
97813 if (within_module_init(iter->code, mod))
97814 iter->code = 0;
97815 }
97816+ pax_close_kernel();
97817 }
97818
97819 static int
97820diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
97821index 5c5987f..bc502b0 100644
97822--- a/kernel/kallsyms.c
97823+++ b/kernel/kallsyms.c
97824@@ -11,6 +11,9 @@
97825 * Changed the compression method from stem compression to "table lookup"
97826 * compression (see scripts/kallsyms.c for a more complete description)
97827 */
97828+#ifdef CONFIG_GRKERNSEC_HIDESYM
97829+#define __INCLUDED_BY_HIDESYM 1
97830+#endif
97831 #include <linux/kallsyms.h>
97832 #include <linux/module.h>
97833 #include <linux/init.h>
97834@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
97835
97836 static inline int is_kernel_inittext(unsigned long addr)
97837 {
97838+ if (system_state != SYSTEM_BOOTING)
97839+ return 0;
97840+
97841 if (addr >= (unsigned long)_sinittext
97842 && addr <= (unsigned long)_einittext)
97843 return 1;
97844 return 0;
97845 }
97846
97847+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
97848+#ifdef CONFIG_MODULES
97849+static inline int is_module_text(unsigned long addr)
97850+{
97851+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
97852+ return 1;
97853+
97854+ addr = ktla_ktva(addr);
97855+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
97856+}
97857+#else
97858+static inline int is_module_text(unsigned long addr)
97859+{
97860+ return 0;
97861+}
97862+#endif
97863+#endif
97864+
97865 static inline int is_kernel_text(unsigned long addr)
97866 {
97867 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
97868@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
97869
97870 static inline int is_kernel(unsigned long addr)
97871 {
97872+
97873+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
97874+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
97875+ return 1;
97876+
97877+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
97878+#else
97879 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
97880+#endif
97881+
97882 return 1;
97883 return in_gate_area_no_mm(addr);
97884 }
97885
97886 static int is_ksym_addr(unsigned long addr)
97887 {
97888+
97889+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
97890+ if (is_module_text(addr))
97891+ return 0;
97892+#endif
97893+
97894 if (all_var)
97895 return is_kernel(addr);
97896
97897@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
97898
97899 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
97900 {
97901- iter->name[0] = '\0';
97902 iter->nameoff = get_symbol_offset(new_pos);
97903 iter->pos = new_pos;
97904 }
97905@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
97906 {
97907 struct kallsym_iter *iter = m->private;
97908
97909+#ifdef CONFIG_GRKERNSEC_HIDESYM
97910+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
97911+ return 0;
97912+#endif
97913+
97914 /* Some debugging symbols have no name. Ignore them. */
97915 if (!iter->name[0])
97916 return 0;
97917@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
97918 */
97919 type = iter->exported ? toupper(iter->type) :
97920 tolower(iter->type);
97921+
97922 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
97923 type, iter->name, iter->module_name);
97924 } else
97925diff --git a/kernel/kcmp.c b/kernel/kcmp.c
97926index 0aa69ea..a7fcafb 100644
97927--- a/kernel/kcmp.c
97928+++ b/kernel/kcmp.c
97929@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
97930 struct task_struct *task1, *task2;
97931 int ret;
97932
97933+#ifdef CONFIG_GRKERNSEC
97934+ return -ENOSYS;
97935+#endif
97936+
97937 rcu_read_lock();
97938
97939 /*
97940diff --git a/kernel/kexec.c b/kernel/kexec.c
97941index 38c25b1..12b3f69 100644
97942--- a/kernel/kexec.c
97943+++ b/kernel/kexec.c
97944@@ -1348,7 +1348,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
97945 compat_ulong_t, flags)
97946 {
97947 struct compat_kexec_segment in;
97948- struct kexec_segment out, __user *ksegments;
97949+ struct kexec_segment out;
97950+ struct kexec_segment __user *ksegments;
97951 unsigned long i, result;
97952
97953 /* Don't allow clients that don't understand the native
97954diff --git a/kernel/kmod.c b/kernel/kmod.c
97955index 2777f40..a689506 100644
97956--- a/kernel/kmod.c
97957+++ b/kernel/kmod.c
97958@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
97959 kfree(info->argv);
97960 }
97961
97962-static int call_modprobe(char *module_name, int wait)
97963+static int call_modprobe(char *module_name, char *module_param, int wait)
97964 {
97965 struct subprocess_info *info;
97966 static char *envp[] = {
97967@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
97968 NULL
97969 };
97970
97971- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
97972+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
97973 if (!argv)
97974 goto out;
97975
97976@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
97977 argv[1] = "-q";
97978 argv[2] = "--";
97979 argv[3] = module_name; /* check free_modprobe_argv() */
97980- argv[4] = NULL;
97981+ argv[4] = module_param;
97982+ argv[5] = NULL;
97983
97984 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
97985 NULL, free_modprobe_argv, NULL);
97986@@ -122,9 +123,8 @@ out:
97987 * If module auto-loading support is disabled then this function
97988 * becomes a no-operation.
97989 */
97990-int __request_module(bool wait, const char *fmt, ...)
97991+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
97992 {
97993- va_list args;
97994 char module_name[MODULE_NAME_LEN];
97995 unsigned int max_modprobes;
97996 int ret;
97997@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
97998 if (!modprobe_path[0])
97999 return 0;
98000
98001- va_start(args, fmt);
98002- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
98003- va_end(args);
98004+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
98005 if (ret >= MODULE_NAME_LEN)
98006 return -ENAMETOOLONG;
98007
98008@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
98009 if (ret)
98010 return ret;
98011
98012+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98013+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
98014+ /* hack to workaround consolekit/udisks stupidity */
98015+ read_lock(&tasklist_lock);
98016+ if (!strcmp(current->comm, "mount") &&
98017+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
98018+ read_unlock(&tasklist_lock);
98019+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
98020+ return -EPERM;
98021+ }
98022+ read_unlock(&tasklist_lock);
98023+ }
98024+#endif
98025+
98026 /* If modprobe needs a service that is in a module, we get a recursive
98027 * loop. Limit the number of running kmod threads to max_threads/2 or
98028 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
98029@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
98030
98031 trace_module_request(module_name, wait, _RET_IP_);
98032
98033- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
98034+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
98035
98036 atomic_dec(&kmod_concurrent);
98037 return ret;
98038 }
98039+
98040+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
98041+{
98042+ va_list args;
98043+ int ret;
98044+
98045+ va_start(args, fmt);
98046+ ret = ____request_module(wait, module_param, fmt, args);
98047+ va_end(args);
98048+
98049+ return ret;
98050+}
98051+
98052+int __request_module(bool wait, const char *fmt, ...)
98053+{
98054+ va_list args;
98055+ int ret;
98056+
98057+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98058+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
98059+ char module_param[MODULE_NAME_LEN];
98060+
98061+ memset(module_param, 0, sizeof(module_param));
98062+
98063+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
98064+
98065+ va_start(args, fmt);
98066+ ret = ____request_module(wait, module_param, fmt, args);
98067+ va_end(args);
98068+
98069+ return ret;
98070+ }
98071+#endif
98072+
98073+ va_start(args, fmt);
98074+ ret = ____request_module(wait, NULL, fmt, args);
98075+ va_end(args);
98076+
98077+ return ret;
98078+}
98079+
98080 EXPORT_SYMBOL(__request_module);
98081 #endif /* CONFIG_MODULES */
98082
98083 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
98084 {
98085+#ifdef CONFIG_GRKERNSEC
98086+ kfree(info->path);
98087+ info->path = info->origpath;
98088+#endif
98089 if (info->cleanup)
98090 (*info->cleanup)(info);
98091 kfree(info);
98092@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
98093 */
98094 set_user_nice(current, 0);
98095
98096+#ifdef CONFIG_GRKERNSEC
98097+ /* this is race-free as far as userland is concerned as we copied
98098+ out the path to be used prior to this point and are now operating
98099+ on that copy
98100+ */
98101+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
98102+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
98103+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
98104+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
98105+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
98106+ retval = -EPERM;
98107+ goto out;
98108+ }
98109+#endif
98110+
98111 retval = -ENOMEM;
98112 new = prepare_kernel_cred(current);
98113 if (!new)
98114@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
98115 commit_creds(new);
98116
98117 retval = do_execve(getname_kernel(sub_info->path),
98118- (const char __user *const __user *)sub_info->argv,
98119- (const char __user *const __user *)sub_info->envp);
98120+ (const char __user *const __force_user *)sub_info->argv,
98121+ (const char __user *const __force_user *)sub_info->envp);
98122 out:
98123 sub_info->retval = retval;
98124 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
98125@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
98126 *
98127 * Thus the __user pointer cast is valid here.
98128 */
98129- sys_wait4(pid, (int __user *)&ret, 0, NULL);
98130+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
98131
98132 /*
98133 * If ret is 0, either ____call_usermodehelper failed and the
98134@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
98135 goto out;
98136
98137 INIT_WORK(&sub_info->work, __call_usermodehelper);
98138+#ifdef CONFIG_GRKERNSEC
98139+ sub_info->origpath = path;
98140+ sub_info->path = kstrdup(path, gfp_mask);
98141+#else
98142 sub_info->path = path;
98143+#endif
98144 sub_info->argv = argv;
98145 sub_info->envp = envp;
98146
98147@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
98148 static int proc_cap_handler(struct ctl_table *table, int write,
98149 void __user *buffer, size_t *lenp, loff_t *ppos)
98150 {
98151- struct ctl_table t;
98152+ ctl_table_no_const t;
98153 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
98154 kernel_cap_t new_cap;
98155 int err, i;
98156diff --git a/kernel/kprobes.c b/kernel/kprobes.c
98157index c90e417..e6c515d 100644
98158--- a/kernel/kprobes.c
98159+++ b/kernel/kprobes.c
98160@@ -31,6 +31,9 @@
98161 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
98162 * <prasanna@in.ibm.com> added function-return probes.
98163 */
98164+#ifdef CONFIG_GRKERNSEC_HIDESYM
98165+#define __INCLUDED_BY_HIDESYM 1
98166+#endif
98167 #include <linux/kprobes.h>
98168 #include <linux/hash.h>
98169 #include <linux/init.h>
98170@@ -122,12 +125,12 @@ enum kprobe_slot_state {
98171
98172 static void *alloc_insn_page(void)
98173 {
98174- return module_alloc(PAGE_SIZE);
98175+ return module_alloc_exec(PAGE_SIZE);
98176 }
98177
98178 static void free_insn_page(void *page)
98179 {
98180- module_memfree(page);
98181+ module_memfree_exec(page);
98182 }
98183
98184 struct kprobe_insn_cache kprobe_insn_slots = {
98185@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
98186 kprobe_type = "k";
98187
98188 if (sym)
98189- seq_printf(pi, "%p %s %s+0x%x %s ",
98190+ seq_printf(pi, "%pK %s %s+0x%x %s ",
98191 p->addr, kprobe_type, sym, offset,
98192 (modname ? modname : " "));
98193 else
98194- seq_printf(pi, "%p %s %p ",
98195+ seq_printf(pi, "%pK %s %pK ",
98196 p->addr, kprobe_type, p->addr);
98197
98198 if (!pp)
98199diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
98200index 6683cce..daf8999 100644
98201--- a/kernel/ksysfs.c
98202+++ b/kernel/ksysfs.c
98203@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
98204 {
98205 if (count+1 > UEVENT_HELPER_PATH_LEN)
98206 return -ENOENT;
98207+ if (!capable(CAP_SYS_ADMIN))
98208+ return -EPERM;
98209 memcpy(uevent_helper, buf, count);
98210 uevent_helper[count] = '\0';
98211 if (count && uevent_helper[count-1] == '\n')
98212@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
98213 return count;
98214 }
98215
98216-static struct bin_attribute notes_attr = {
98217+static bin_attribute_no_const notes_attr __read_only = {
98218 .attr = {
98219 .name = "notes",
98220 .mode = S_IRUGO,
98221diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
98222index ba77ab5..d6a3e20 100644
98223--- a/kernel/locking/lockdep.c
98224+++ b/kernel/locking/lockdep.c
98225@@ -599,6 +599,10 @@ static int static_obj(void *obj)
98226 end = (unsigned long) &_end,
98227 addr = (unsigned long) obj;
98228
98229+#ifdef CONFIG_PAX_KERNEXEC
98230+ start = ktla_ktva(start);
98231+#endif
98232+
98233 /*
98234 * static variable?
98235 */
98236@@ -743,6 +747,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
98237 if (!static_obj(lock->key)) {
98238 debug_locks_off();
98239 printk("INFO: trying to register non-static key.\n");
98240+ printk("lock:%pS key:%pS.\n", lock, lock->key);
98241 printk("the code is fine but needs lockdep annotation.\n");
98242 printk("turning off the locking correctness validator.\n");
98243 dump_stack();
98244@@ -3088,7 +3093,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
98245 if (!class)
98246 return 0;
98247 }
98248- atomic_inc((atomic_t *)&class->ops);
98249+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
98250 if (very_verbose(class)) {
98251 printk("\nacquire class [%p] %s", class->key, class->name);
98252 if (class->name_version > 1)
98253diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
98254index ef43ac4..2720dfa 100644
98255--- a/kernel/locking/lockdep_proc.c
98256+++ b/kernel/locking/lockdep_proc.c
98257@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
98258 return 0;
98259 }
98260
98261- seq_printf(m, "%p", class->key);
98262+ seq_printf(m, "%pK", class->key);
98263 #ifdef CONFIG_DEBUG_LOCKDEP
98264 seq_printf(m, " OPS:%8ld", class->ops);
98265 #endif
98266@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
98267
98268 list_for_each_entry(entry, &class->locks_after, entry) {
98269 if (entry->distance == 1) {
98270- seq_printf(m, " -> [%p] ", entry->class->key);
98271+ seq_printf(m, " -> [%pK] ", entry->class->key);
98272 print_name(m, entry->class);
98273 seq_puts(m, "\n");
98274 }
98275@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
98276 if (!class->key)
98277 continue;
98278
98279- seq_printf(m, "[%p] ", class->key);
98280+ seq_printf(m, "[%pK] ", class->key);
98281 print_name(m, class);
98282 seq_puts(m, "\n");
98283 }
98284@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
98285 if (!i)
98286 seq_line(m, '-', 40-namelen, namelen);
98287
98288- snprintf(ip, sizeof(ip), "[<%p>]",
98289+ snprintf(ip, sizeof(ip), "[<%pK>]",
98290 (void *)class->contention_point[i]);
98291 seq_printf(m, "%40s %14lu %29s %pS\n",
98292 name, stats->contention_point[i],
98293@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
98294 if (!i)
98295 seq_line(m, '-', 40-namelen, namelen);
98296
98297- snprintf(ip, sizeof(ip), "[<%p>]",
98298+ snprintf(ip, sizeof(ip), "[<%pK>]",
98299 (void *)class->contending_point[i]);
98300 seq_printf(m, "%40s %14lu %29s %pS\n",
98301 name, stats->contending_point[i],
98302diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
98303index d1fe2ba..180cd65e 100644
98304--- a/kernel/locking/mcs_spinlock.h
98305+++ b/kernel/locking/mcs_spinlock.h
98306@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
98307 */
98308 return;
98309 }
98310- ACCESS_ONCE(prev->next) = node;
98311+ ACCESS_ONCE_RW(prev->next) = node;
98312
98313 /* Wait until the lock holder passes the lock down. */
98314 arch_mcs_spin_lock_contended(&node->locked);
98315diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
98316index 3ef3736..9c951fa 100644
98317--- a/kernel/locking/mutex-debug.c
98318+++ b/kernel/locking/mutex-debug.c
98319@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
98320 }
98321
98322 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98323- struct thread_info *ti)
98324+ struct task_struct *task)
98325 {
98326 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
98327
98328 /* Mark the current thread as blocked on the lock: */
98329- ti->task->blocked_on = waiter;
98330+ task->blocked_on = waiter;
98331 }
98332
98333 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98334- struct thread_info *ti)
98335+ struct task_struct *task)
98336 {
98337 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
98338- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
98339- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
98340- ti->task->blocked_on = NULL;
98341+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
98342+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
98343+ task->blocked_on = NULL;
98344
98345 list_del_init(&waiter->list);
98346 waiter->task = NULL;
98347diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
98348index 0799fd3..d06ae3b 100644
98349--- a/kernel/locking/mutex-debug.h
98350+++ b/kernel/locking/mutex-debug.h
98351@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
98352 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
98353 extern void debug_mutex_add_waiter(struct mutex *lock,
98354 struct mutex_waiter *waiter,
98355- struct thread_info *ti);
98356+ struct task_struct *task);
98357 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98358- struct thread_info *ti);
98359+ struct task_struct *task);
98360 extern void debug_mutex_unlock(struct mutex *lock);
98361 extern void debug_mutex_init(struct mutex *lock, const char *name,
98362 struct lock_class_key *key);
98363diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
98364index 94674e5..de4966f 100644
98365--- a/kernel/locking/mutex.c
98366+++ b/kernel/locking/mutex.c
98367@@ -542,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
98368 goto skip_wait;
98369
98370 debug_mutex_lock_common(lock, &waiter);
98371- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
98372+ debug_mutex_add_waiter(lock, &waiter, task);
98373
98374 /* add waiting tasks to the end of the waitqueue (FIFO): */
98375 list_add_tail(&waiter.list, &lock->wait_list);
98376@@ -589,7 +589,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
98377 }
98378 __set_task_state(task, TASK_RUNNING);
98379
98380- mutex_remove_waiter(lock, &waiter, current_thread_info());
98381+ mutex_remove_waiter(lock, &waiter, task);
98382 /* set it to 0 if there are no waiters left: */
98383 if (likely(list_empty(&lock->wait_list)))
98384 atomic_set(&lock->count, 0);
98385@@ -610,7 +610,7 @@ skip_wait:
98386 return 0;
98387
98388 err:
98389- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
98390+ mutex_remove_waiter(lock, &waiter, task);
98391 spin_unlock_mutex(&lock->wait_lock, flags);
98392 debug_mutex_free_waiter(&waiter);
98393 mutex_release(&lock->dep_map, 1, ip);
98394diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
98395index c112d00..1946ad9 100644
98396--- a/kernel/locking/osq_lock.c
98397+++ b/kernel/locking/osq_lock.c
98398@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
98399
98400 prev = decode_cpu(old);
98401 node->prev = prev;
98402- ACCESS_ONCE(prev->next) = node;
98403+ ACCESS_ONCE_RW(prev->next) = node;
98404
98405 /*
98406 * Normally @prev is untouchable after the above store; because at that
98407@@ -170,8 +170,8 @@ unqueue:
98408 * it will wait in Step-A.
98409 */
98410
98411- ACCESS_ONCE(next->prev) = prev;
98412- ACCESS_ONCE(prev->next) = next;
98413+ ACCESS_ONCE_RW(next->prev) = prev;
98414+ ACCESS_ONCE_RW(prev->next) = next;
98415
98416 return false;
98417 }
98418@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
98419 node = this_cpu_ptr(&osq_node);
98420 next = xchg(&node->next, NULL);
98421 if (next) {
98422- ACCESS_ONCE(next->locked) = 1;
98423+ ACCESS_ONCE_RW(next->locked) = 1;
98424 return;
98425 }
98426
98427 next = osq_wait_next(lock, node, NULL);
98428 if (next)
98429- ACCESS_ONCE(next->locked) = 1;
98430+ ACCESS_ONCE_RW(next->locked) = 1;
98431 }
98432diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
98433index 1d96dd0..994ff19 100644
98434--- a/kernel/locking/rtmutex-tester.c
98435+++ b/kernel/locking/rtmutex-tester.c
98436@@ -22,7 +22,7 @@
98437 #define MAX_RT_TEST_MUTEXES 8
98438
98439 static spinlock_t rttest_lock;
98440-static atomic_t rttest_event;
98441+static atomic_unchecked_t rttest_event;
98442
98443 struct test_thread_data {
98444 int opcode;
98445@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98446
98447 case RTTEST_LOCKCONT:
98448 td->mutexes[td->opdata] = 1;
98449- td->event = atomic_add_return(1, &rttest_event);
98450+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98451 return 0;
98452
98453 case RTTEST_RESET:
98454@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98455 return 0;
98456
98457 case RTTEST_RESETEVENT:
98458- atomic_set(&rttest_event, 0);
98459+ atomic_set_unchecked(&rttest_event, 0);
98460 return 0;
98461
98462 default:
98463@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98464 return ret;
98465
98466 td->mutexes[id] = 1;
98467- td->event = atomic_add_return(1, &rttest_event);
98468+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98469 rt_mutex_lock(&mutexes[id]);
98470- td->event = atomic_add_return(1, &rttest_event);
98471+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98472 td->mutexes[id] = 4;
98473 return 0;
98474
98475@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98476 return ret;
98477
98478 td->mutexes[id] = 1;
98479- td->event = atomic_add_return(1, &rttest_event);
98480+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98481 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
98482- td->event = atomic_add_return(1, &rttest_event);
98483+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98484 td->mutexes[id] = ret ? 0 : 4;
98485 return ret ? -EINTR : 0;
98486
98487@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98488 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
98489 return ret;
98490
98491- td->event = atomic_add_return(1, &rttest_event);
98492+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98493 rt_mutex_unlock(&mutexes[id]);
98494- td->event = atomic_add_return(1, &rttest_event);
98495+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98496 td->mutexes[id] = 0;
98497 return 0;
98498
98499@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98500 break;
98501
98502 td->mutexes[dat] = 2;
98503- td->event = atomic_add_return(1, &rttest_event);
98504+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98505 break;
98506
98507 default:
98508@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98509 return;
98510
98511 td->mutexes[dat] = 3;
98512- td->event = atomic_add_return(1, &rttest_event);
98513+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98514 break;
98515
98516 case RTTEST_LOCKNOWAIT:
98517@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98518 return;
98519
98520 td->mutexes[dat] = 1;
98521- td->event = atomic_add_return(1, &rttest_event);
98522+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98523 return;
98524
98525 default:
98526diff --git a/kernel/module.c b/kernel/module.c
98527index ec53f59..67d9655 100644
98528--- a/kernel/module.c
98529+++ b/kernel/module.c
98530@@ -59,6 +59,7 @@
98531 #include <linux/jump_label.h>
98532 #include <linux/pfn.h>
98533 #include <linux/bsearch.h>
98534+#include <linux/grsecurity.h>
98535 #include <uapi/linux/module.h>
98536 #include "module-internal.h"
98537
98538@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
98539
98540 /* Bounds of module allocation, for speeding __module_address.
98541 * Protected by module_mutex. */
98542-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
98543+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
98544+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
98545
98546 int register_module_notifier(struct notifier_block *nb)
98547 {
98548@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
98549 return true;
98550
98551 list_for_each_entry_rcu(mod, &modules, list) {
98552- struct symsearch arr[] = {
98553+ struct symsearch modarr[] = {
98554 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
98555 NOT_GPL_ONLY, false },
98556 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
98557@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
98558 if (mod->state == MODULE_STATE_UNFORMED)
98559 continue;
98560
98561- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
98562+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
98563 return true;
98564 }
98565 return false;
98566@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
98567 if (!pcpusec->sh_size)
98568 return 0;
98569
98570- if (align > PAGE_SIZE) {
98571+ if (align-1 >= PAGE_SIZE) {
98572 pr_warn("%s: per-cpu alignment %li > %li\n",
98573 mod->name, align, PAGE_SIZE);
98574 align = PAGE_SIZE;
98575@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
98576 static ssize_t show_coresize(struct module_attribute *mattr,
98577 struct module_kobject *mk, char *buffer)
98578 {
98579- return sprintf(buffer, "%u\n", mk->mod->core_size);
98580+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
98581 }
98582
98583 static struct module_attribute modinfo_coresize =
98584@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
98585 static ssize_t show_initsize(struct module_attribute *mattr,
98586 struct module_kobject *mk, char *buffer)
98587 {
98588- return sprintf(buffer, "%u\n", mk->mod->init_size);
98589+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
98590 }
98591
98592 static struct module_attribute modinfo_initsize =
98593@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
98594 goto bad_version;
98595 }
98596
98597+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
98598+ /*
98599+ * avoid potentially printing jibberish on attempted load
98600+ * of a module randomized with a different seed
98601+ */
98602+ pr_warn("no symbol version for %s\n", symname);
98603+#else
98604 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
98605+#endif
98606 return 0;
98607
98608 bad_version:
98609+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
98610+ /*
98611+ * avoid potentially printing jibberish on attempted load
98612+ * of a module randomized with a different seed
98613+ */
98614+ pr_warn("attempted module disagrees about version of symbol %s\n",
98615+ symname);
98616+#else
98617 pr_warn("%s: disagrees about version of symbol %s\n",
98618 mod->name, symname);
98619+#endif
98620 return 0;
98621 }
98622
98623@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
98624 */
98625 #ifdef CONFIG_SYSFS
98626
98627-#ifdef CONFIG_KALLSYMS
98628+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
98629 static inline bool sect_empty(const Elf_Shdr *sect)
98630 {
98631 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
98632@@ -1419,7 +1438,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
98633 {
98634 unsigned int notes, loaded, i;
98635 struct module_notes_attrs *notes_attrs;
98636- struct bin_attribute *nattr;
98637+ bin_attribute_no_const *nattr;
98638
98639 /* failed to create section attributes, so can't create notes */
98640 if (!mod->sect_attrs)
98641@@ -1531,7 +1550,7 @@ static void del_usage_links(struct module *mod)
98642 static int module_add_modinfo_attrs(struct module *mod)
98643 {
98644 struct module_attribute *attr;
98645- struct module_attribute *temp_attr;
98646+ module_attribute_no_const *temp_attr;
98647 int error = 0;
98648 int i;
98649
98650@@ -1741,21 +1760,21 @@ static void set_section_ro_nx(void *base,
98651
98652 static void unset_module_core_ro_nx(struct module *mod)
98653 {
98654- set_page_attributes(mod->module_core + mod->core_text_size,
98655- mod->module_core + mod->core_size,
98656+ set_page_attributes(mod->module_core_rw,
98657+ mod->module_core_rw + mod->core_size_rw,
98658 set_memory_x);
98659- set_page_attributes(mod->module_core,
98660- mod->module_core + mod->core_ro_size,
98661+ set_page_attributes(mod->module_core_rx,
98662+ mod->module_core_rx + mod->core_size_rx,
98663 set_memory_rw);
98664 }
98665
98666 static void unset_module_init_ro_nx(struct module *mod)
98667 {
98668- set_page_attributes(mod->module_init + mod->init_text_size,
98669- mod->module_init + mod->init_size,
98670+ set_page_attributes(mod->module_init_rw,
98671+ mod->module_init_rw + mod->init_size_rw,
98672 set_memory_x);
98673- set_page_attributes(mod->module_init,
98674- mod->module_init + mod->init_ro_size,
98675+ set_page_attributes(mod->module_init_rx,
98676+ mod->module_init_rx + mod->init_size_rx,
98677 set_memory_rw);
98678 }
98679
98680@@ -1768,14 +1787,14 @@ void set_all_modules_text_rw(void)
98681 list_for_each_entry_rcu(mod, &modules, list) {
98682 if (mod->state == MODULE_STATE_UNFORMED)
98683 continue;
98684- if ((mod->module_core) && (mod->core_text_size)) {
98685- set_page_attributes(mod->module_core,
98686- mod->module_core + mod->core_text_size,
98687+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
98688+ set_page_attributes(mod->module_core_rx,
98689+ mod->module_core_rx + mod->core_size_rx,
98690 set_memory_rw);
98691 }
98692- if ((mod->module_init) && (mod->init_text_size)) {
98693- set_page_attributes(mod->module_init,
98694- mod->module_init + mod->init_text_size,
98695+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
98696+ set_page_attributes(mod->module_init_rx,
98697+ mod->module_init_rx + mod->init_size_rx,
98698 set_memory_rw);
98699 }
98700 }
98701@@ -1791,14 +1810,14 @@ void set_all_modules_text_ro(void)
98702 list_for_each_entry_rcu(mod, &modules, list) {
98703 if (mod->state == MODULE_STATE_UNFORMED)
98704 continue;
98705- if ((mod->module_core) && (mod->core_text_size)) {
98706- set_page_attributes(mod->module_core,
98707- mod->module_core + mod->core_text_size,
98708+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
98709+ set_page_attributes(mod->module_core_rx,
98710+ mod->module_core_rx + mod->core_size_rx,
98711 set_memory_ro);
98712 }
98713- if ((mod->module_init) && (mod->init_text_size)) {
98714- set_page_attributes(mod->module_init,
98715- mod->module_init + mod->init_text_size,
98716+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
98717+ set_page_attributes(mod->module_init_rx,
98718+ mod->module_init_rx + mod->init_size_rx,
98719 set_memory_ro);
98720 }
98721 }
98722@@ -1807,7 +1826,15 @@ void set_all_modules_text_ro(void)
98723 #else
98724 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
98725 static void unset_module_core_ro_nx(struct module *mod) { }
98726-static void unset_module_init_ro_nx(struct module *mod) { }
98727+static void unset_module_init_ro_nx(struct module *mod)
98728+{
98729+
98730+#ifdef CONFIG_PAX_KERNEXEC
98731+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
98732+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
98733+#endif
98734+
98735+}
98736 #endif
98737
98738 void __weak module_memfree(void *module_region)
98739@@ -1861,16 +1888,19 @@ static void free_module(struct module *mod)
98740 /* This may be NULL, but that's OK */
98741 unset_module_init_ro_nx(mod);
98742 module_arch_freeing_init(mod);
98743- module_memfree(mod->module_init);
98744+ module_memfree(mod->module_init_rw);
98745+ module_memfree_exec(mod->module_init_rx);
98746 kfree(mod->args);
98747 percpu_modfree(mod);
98748
98749 /* Free lock-classes; relies on the preceding sync_rcu(). */
98750- lockdep_free_key_range(mod->module_core, mod->core_size);
98751+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
98752+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
98753
98754 /* Finally, free the core (containing the module structure) */
98755 unset_module_core_ro_nx(mod);
98756- module_memfree(mod->module_core);
98757+ module_memfree_exec(mod->module_core_rx);
98758+ module_memfree(mod->module_core_rw);
98759
98760 #ifdef CONFIG_MPU
98761 update_protections(current->mm);
98762@@ -1939,9 +1969,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
98763 int ret = 0;
98764 const struct kernel_symbol *ksym;
98765
98766+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98767+ int is_fs_load = 0;
98768+ int register_filesystem_found = 0;
98769+ char *p;
98770+
98771+ p = strstr(mod->args, "grsec_modharden_fs");
98772+ if (p) {
98773+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
98774+ /* copy \0 as well */
98775+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
98776+ is_fs_load = 1;
98777+ }
98778+#endif
98779+
98780 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
98781 const char *name = info->strtab + sym[i].st_name;
98782
98783+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98784+ /* it's a real shame this will never get ripped and copied
98785+ upstream! ;(
98786+ */
98787+ if (is_fs_load && !strcmp(name, "register_filesystem"))
98788+ register_filesystem_found = 1;
98789+#endif
98790+
98791 switch (sym[i].st_shndx) {
98792 case SHN_COMMON:
98793 /* Ignore common symbols */
98794@@ -1966,7 +2018,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
98795 ksym = resolve_symbol_wait(mod, info, name);
98796 /* Ok if resolved. */
98797 if (ksym && !IS_ERR(ksym)) {
98798+ pax_open_kernel();
98799 sym[i].st_value = ksym->value;
98800+ pax_close_kernel();
98801 break;
98802 }
98803
98804@@ -1985,11 +2039,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
98805 secbase = (unsigned long)mod_percpu(mod);
98806 else
98807 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
98808+ pax_open_kernel();
98809 sym[i].st_value += secbase;
98810+ pax_close_kernel();
98811 break;
98812 }
98813 }
98814
98815+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98816+ if (is_fs_load && !register_filesystem_found) {
98817+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
98818+ ret = -EPERM;
98819+ }
98820+#endif
98821+
98822 return ret;
98823 }
98824
98825@@ -2073,22 +2136,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
98826 || s->sh_entsize != ~0UL
98827 || strstarts(sname, ".init"))
98828 continue;
98829- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
98830+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
98831+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
98832+ else
98833+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
98834 pr_debug("\t%s\n", sname);
98835 }
98836- switch (m) {
98837- case 0: /* executable */
98838- mod->core_size = debug_align(mod->core_size);
98839- mod->core_text_size = mod->core_size;
98840- break;
98841- case 1: /* RO: text and ro-data */
98842- mod->core_size = debug_align(mod->core_size);
98843- mod->core_ro_size = mod->core_size;
98844- break;
98845- case 3: /* whole core */
98846- mod->core_size = debug_align(mod->core_size);
98847- break;
98848- }
98849 }
98850
98851 pr_debug("Init section allocation order:\n");
98852@@ -2102,23 +2155,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
98853 || s->sh_entsize != ~0UL
98854 || !strstarts(sname, ".init"))
98855 continue;
98856- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
98857- | INIT_OFFSET_MASK);
98858+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
98859+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
98860+ else
98861+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
98862+ s->sh_entsize |= INIT_OFFSET_MASK;
98863 pr_debug("\t%s\n", sname);
98864 }
98865- switch (m) {
98866- case 0: /* executable */
98867- mod->init_size = debug_align(mod->init_size);
98868- mod->init_text_size = mod->init_size;
98869- break;
98870- case 1: /* RO: text and ro-data */
98871- mod->init_size = debug_align(mod->init_size);
98872- mod->init_ro_size = mod->init_size;
98873- break;
98874- case 3: /* whole init */
98875- mod->init_size = debug_align(mod->init_size);
98876- break;
98877- }
98878 }
98879 }
98880
98881@@ -2291,7 +2334,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
98882
98883 /* Put symbol section at end of init part of module. */
98884 symsect->sh_flags |= SHF_ALLOC;
98885- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
98886+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
98887 info->index.sym) | INIT_OFFSET_MASK;
98888 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
98889
98890@@ -2308,16 +2351,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
98891 }
98892
98893 /* Append room for core symbols at end of core part. */
98894- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
98895- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
98896- mod->core_size += strtab_size;
98897- mod->core_size = debug_align(mod->core_size);
98898+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
98899+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
98900+ mod->core_size_rx += strtab_size;
98901+ mod->core_size_rx = debug_align(mod->core_size_rx);
98902
98903 /* Put string table section at end of init part of module. */
98904 strsect->sh_flags |= SHF_ALLOC;
98905- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
98906+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
98907 info->index.str) | INIT_OFFSET_MASK;
98908- mod->init_size = debug_align(mod->init_size);
98909+ mod->init_size_rx = debug_align(mod->init_size_rx);
98910 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
98911 }
98912
98913@@ -2334,12 +2377,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
98914 /* Make sure we get permanent strtab: don't use info->strtab. */
98915 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
98916
98917+ pax_open_kernel();
98918+
98919 /* Set types up while we still have access to sections. */
98920 for (i = 0; i < mod->num_symtab; i++)
98921 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
98922
98923- mod->core_symtab = dst = mod->module_core + info->symoffs;
98924- mod->core_strtab = s = mod->module_core + info->stroffs;
98925+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
98926+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
98927 src = mod->symtab;
98928 for (ndst = i = 0; i < mod->num_symtab; i++) {
98929 if (i == 0 ||
98930@@ -2351,6 +2396,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
98931 }
98932 }
98933 mod->core_num_syms = ndst;
98934+
98935+ pax_close_kernel();
98936 }
98937 #else
98938 static inline void layout_symtab(struct module *mod, struct load_info *info)
98939@@ -2384,17 +2431,33 @@ void * __weak module_alloc(unsigned long size)
98940 return vmalloc_exec(size);
98941 }
98942
98943-static void *module_alloc_update_bounds(unsigned long size)
98944+static void *module_alloc_update_bounds_rw(unsigned long size)
98945 {
98946 void *ret = module_alloc(size);
98947
98948 if (ret) {
98949 mutex_lock(&module_mutex);
98950 /* Update module bounds. */
98951- if ((unsigned long)ret < module_addr_min)
98952- module_addr_min = (unsigned long)ret;
98953- if ((unsigned long)ret + size > module_addr_max)
98954- module_addr_max = (unsigned long)ret + size;
98955+ if ((unsigned long)ret < module_addr_min_rw)
98956+ module_addr_min_rw = (unsigned long)ret;
98957+ if ((unsigned long)ret + size > module_addr_max_rw)
98958+ module_addr_max_rw = (unsigned long)ret + size;
98959+ mutex_unlock(&module_mutex);
98960+ }
98961+ return ret;
98962+}
98963+
98964+static void *module_alloc_update_bounds_rx(unsigned long size)
98965+{
98966+ void *ret = module_alloc_exec(size);
98967+
98968+ if (ret) {
98969+ mutex_lock(&module_mutex);
98970+ /* Update module bounds. */
98971+ if ((unsigned long)ret < module_addr_min_rx)
98972+ module_addr_min_rx = (unsigned long)ret;
98973+ if ((unsigned long)ret + size > module_addr_max_rx)
98974+ module_addr_max_rx = (unsigned long)ret + size;
98975 mutex_unlock(&module_mutex);
98976 }
98977 return ret;
98978@@ -2665,7 +2728,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
98979 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
98980
98981 if (info->index.sym == 0) {
98982+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
98983+ /*
98984+ * avoid potentially printing jibberish on attempted load
98985+ * of a module randomized with a different seed
98986+ */
98987+ pr_warn("module has no symbols (stripped?)\n");
98988+#else
98989 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
98990+#endif
98991 return ERR_PTR(-ENOEXEC);
98992 }
98993
98994@@ -2681,8 +2752,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
98995 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
98996 {
98997 const char *modmagic = get_modinfo(info, "vermagic");
98998+ const char *license = get_modinfo(info, "license");
98999 int err;
99000
99001+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
99002+ if (!license || !license_is_gpl_compatible(license))
99003+ return -ENOEXEC;
99004+#endif
99005+
99006 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
99007 modmagic = NULL;
99008
99009@@ -2707,7 +2784,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
99010 }
99011
99012 /* Set up license info based on the info section */
99013- set_license(mod, get_modinfo(info, "license"));
99014+ set_license(mod, license);
99015
99016 return 0;
99017 }
99018@@ -2801,7 +2878,7 @@ static int move_module(struct module *mod, struct load_info *info)
99019 void *ptr;
99020
99021 /* Do the allocs. */
99022- ptr = module_alloc_update_bounds(mod->core_size);
99023+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
99024 /*
99025 * The pointer to this block is stored in the module structure
99026 * which is inside the block. Just mark it as not being a
99027@@ -2811,11 +2888,11 @@ static int move_module(struct module *mod, struct load_info *info)
99028 if (!ptr)
99029 return -ENOMEM;
99030
99031- memset(ptr, 0, mod->core_size);
99032- mod->module_core = ptr;
99033+ memset(ptr, 0, mod->core_size_rw);
99034+ mod->module_core_rw = ptr;
99035
99036- if (mod->init_size) {
99037- ptr = module_alloc_update_bounds(mod->init_size);
99038+ if (mod->init_size_rw) {
99039+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
99040 /*
99041 * The pointer to this block is stored in the module structure
99042 * which is inside the block. This block doesn't need to be
99043@@ -2824,13 +2901,45 @@ static int move_module(struct module *mod, struct load_info *info)
99044 */
99045 kmemleak_ignore(ptr);
99046 if (!ptr) {
99047- module_memfree(mod->module_core);
99048+ module_memfree(mod->module_core_rw);
99049 return -ENOMEM;
99050 }
99051- memset(ptr, 0, mod->init_size);
99052- mod->module_init = ptr;
99053+ memset(ptr, 0, mod->init_size_rw);
99054+ mod->module_init_rw = ptr;
99055 } else
99056- mod->module_init = NULL;
99057+ mod->module_init_rw = NULL;
99058+
99059+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
99060+ kmemleak_not_leak(ptr);
99061+ if (!ptr) {
99062+ if (mod->module_init_rw)
99063+ module_memfree(mod->module_init_rw);
99064+ module_memfree(mod->module_core_rw);
99065+ return -ENOMEM;
99066+ }
99067+
99068+ pax_open_kernel();
99069+ memset(ptr, 0, mod->core_size_rx);
99070+ pax_close_kernel();
99071+ mod->module_core_rx = ptr;
99072+
99073+ if (mod->init_size_rx) {
99074+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
99075+ kmemleak_ignore(ptr);
99076+ if (!ptr && mod->init_size_rx) {
99077+ module_memfree_exec(mod->module_core_rx);
99078+ if (mod->module_init_rw)
99079+ module_memfree(mod->module_init_rw);
99080+ module_memfree(mod->module_core_rw);
99081+ return -ENOMEM;
99082+ }
99083+
99084+ pax_open_kernel();
99085+ memset(ptr, 0, mod->init_size_rx);
99086+ pax_close_kernel();
99087+ mod->module_init_rx = ptr;
99088+ } else
99089+ mod->module_init_rx = NULL;
99090
99091 /* Transfer each section which specifies SHF_ALLOC */
99092 pr_debug("final section addresses:\n");
99093@@ -2841,16 +2950,45 @@ static int move_module(struct module *mod, struct load_info *info)
99094 if (!(shdr->sh_flags & SHF_ALLOC))
99095 continue;
99096
99097- if (shdr->sh_entsize & INIT_OFFSET_MASK)
99098- dest = mod->module_init
99099- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99100- else
99101- dest = mod->module_core + shdr->sh_entsize;
99102+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
99103+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
99104+ dest = mod->module_init_rw
99105+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99106+ else
99107+ dest = mod->module_init_rx
99108+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99109+ } else {
99110+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
99111+ dest = mod->module_core_rw + shdr->sh_entsize;
99112+ else
99113+ dest = mod->module_core_rx + shdr->sh_entsize;
99114+ }
99115+
99116+ if (shdr->sh_type != SHT_NOBITS) {
99117+
99118+#ifdef CONFIG_PAX_KERNEXEC
99119+#ifdef CONFIG_X86_64
99120+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
99121+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
99122+#endif
99123+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
99124+ pax_open_kernel();
99125+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
99126+ pax_close_kernel();
99127+ } else
99128+#endif
99129
99130- if (shdr->sh_type != SHT_NOBITS)
99131 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
99132+ }
99133 /* Update sh_addr to point to copy in image. */
99134- shdr->sh_addr = (unsigned long)dest;
99135+
99136+#ifdef CONFIG_PAX_KERNEXEC
99137+ if (shdr->sh_flags & SHF_EXECINSTR)
99138+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
99139+ else
99140+#endif
99141+
99142+ shdr->sh_addr = (unsigned long)dest;
99143 pr_debug("\t0x%lx %s\n",
99144 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
99145 }
99146@@ -2907,12 +3045,12 @@ static void flush_module_icache(const struct module *mod)
99147 * Do it before processing of module parameters, so the module
99148 * can provide parameter accessor functions of its own.
99149 */
99150- if (mod->module_init)
99151- flush_icache_range((unsigned long)mod->module_init,
99152- (unsigned long)mod->module_init
99153- + mod->init_size);
99154- flush_icache_range((unsigned long)mod->module_core,
99155- (unsigned long)mod->module_core + mod->core_size);
99156+ if (mod->module_init_rx)
99157+ flush_icache_range((unsigned long)mod->module_init_rx,
99158+ (unsigned long)mod->module_init_rx
99159+ + mod->init_size_rx);
99160+ flush_icache_range((unsigned long)mod->module_core_rx,
99161+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
99162
99163 set_fs(old_fs);
99164 }
99165@@ -2970,8 +3108,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
99166 {
99167 percpu_modfree(mod);
99168 module_arch_freeing_init(mod);
99169- module_memfree(mod->module_init);
99170- module_memfree(mod->module_core);
99171+ module_memfree_exec(mod->module_init_rx);
99172+ module_memfree_exec(mod->module_core_rx);
99173+ module_memfree(mod->module_init_rw);
99174+ module_memfree(mod->module_core_rw);
99175 }
99176
99177 int __weak module_finalize(const Elf_Ehdr *hdr,
99178@@ -2984,7 +3124,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
99179 static int post_relocation(struct module *mod, const struct load_info *info)
99180 {
99181 /* Sort exception table now relocations are done. */
99182+ pax_open_kernel();
99183 sort_extable(mod->extable, mod->extable + mod->num_exentries);
99184+ pax_close_kernel();
99185
99186 /* Copy relocated percpu area over. */
99187 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
99188@@ -3032,13 +3174,15 @@ static void do_mod_ctors(struct module *mod)
99189 /* For freeing module_init on success, in case kallsyms traversing */
99190 struct mod_initfree {
99191 struct rcu_head rcu;
99192- void *module_init;
99193+ void *module_init_rw;
99194+ void *module_init_rx;
99195 };
99196
99197 static void do_free_init(struct rcu_head *head)
99198 {
99199 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
99200- module_memfree(m->module_init);
99201+ module_memfree(m->module_init_rw);
99202+ module_memfree_exec(m->module_init_rx);
99203 kfree(m);
99204 }
99205
99206@@ -3058,7 +3202,8 @@ static noinline int do_init_module(struct module *mod)
99207 ret = -ENOMEM;
99208 goto fail;
99209 }
99210- freeinit->module_init = mod->module_init;
99211+ freeinit->module_init_rw = mod->module_init_rw;
99212+ freeinit->module_init_rx = mod->module_init_rx;
99213
99214 /*
99215 * We want to find out whether @mod uses async during init. Clear
99216@@ -3117,10 +3262,10 @@ static noinline int do_init_module(struct module *mod)
99217 #endif
99218 unset_module_init_ro_nx(mod);
99219 module_arch_freeing_init(mod);
99220- mod->module_init = NULL;
99221- mod->init_size = 0;
99222- mod->init_ro_size = 0;
99223- mod->init_text_size = 0;
99224+ mod->module_init_rw = NULL;
99225+ mod->module_init_rx = NULL;
99226+ mod->init_size_rw = 0;
99227+ mod->init_size_rx = 0;
99228 /*
99229 * We want to free module_init, but be aware that kallsyms may be
99230 * walking this with preempt disabled. In all the failure paths,
99231@@ -3208,16 +3353,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
99232 module_bug_finalize(info->hdr, info->sechdrs, mod);
99233
99234 /* Set RO and NX regions for core */
99235- set_section_ro_nx(mod->module_core,
99236- mod->core_text_size,
99237- mod->core_ro_size,
99238- mod->core_size);
99239+ set_section_ro_nx(mod->module_core_rx,
99240+ mod->core_size_rx,
99241+ mod->core_size_rx,
99242+ mod->core_size_rx);
99243
99244 /* Set RO and NX regions for init */
99245- set_section_ro_nx(mod->module_init,
99246- mod->init_text_size,
99247- mod->init_ro_size,
99248- mod->init_size);
99249+ set_section_ro_nx(mod->module_init_rx,
99250+ mod->init_size_rx,
99251+ mod->init_size_rx,
99252+ mod->init_size_rx);
99253
99254 /* Mark state as coming so strong_try_module_get() ignores us,
99255 * but kallsyms etc. can see us. */
99256@@ -3301,9 +3446,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
99257 if (err)
99258 goto free_unload;
99259
99260+ /* Now copy in args */
99261+ mod->args = strndup_user(uargs, ~0UL >> 1);
99262+ if (IS_ERR(mod->args)) {
99263+ err = PTR_ERR(mod->args);
99264+ goto free_unload;
99265+ }
99266+
99267 /* Set up MODINFO_ATTR fields */
99268 setup_modinfo(mod, info);
99269
99270+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99271+ {
99272+ char *p, *p2;
99273+
99274+ if (strstr(mod->args, "grsec_modharden_netdev")) {
99275+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
99276+ err = -EPERM;
99277+ goto free_modinfo;
99278+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
99279+ p += sizeof("grsec_modharden_normal") - 1;
99280+ p2 = strstr(p, "_");
99281+ if (p2) {
99282+ *p2 = '\0';
99283+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
99284+ *p2 = '_';
99285+ }
99286+ err = -EPERM;
99287+ goto free_modinfo;
99288+ }
99289+ }
99290+#endif
99291+
99292 /* Fix up syms, so that st_value is a pointer to location. */
99293 err = simplify_symbols(mod, info);
99294 if (err < 0)
99295@@ -3319,13 +3493,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
99296
99297 flush_module_icache(mod);
99298
99299- /* Now copy in args */
99300- mod->args = strndup_user(uargs, ~0UL >> 1);
99301- if (IS_ERR(mod->args)) {
99302- err = PTR_ERR(mod->args);
99303- goto free_arch_cleanup;
99304- }
99305-
99306 dynamic_debug_setup(info->debug, info->num_debug);
99307
99308 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
99309@@ -3373,11 +3540,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
99310 ddebug_cleanup:
99311 dynamic_debug_remove(info->debug);
99312 synchronize_sched();
99313- kfree(mod->args);
99314- free_arch_cleanup:
99315 module_arch_cleanup(mod);
99316 free_modinfo:
99317 free_modinfo(mod);
99318+ kfree(mod->args);
99319 free_unload:
99320 module_unload_free(mod);
99321 unlink_mod:
99322@@ -3390,7 +3556,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
99323 mutex_unlock(&module_mutex);
99324 free_module:
99325 /* Free lock-classes; relies on the preceding sync_rcu() */
99326- lockdep_free_key_range(mod->module_core, mod->core_size);
99327+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
99328+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
99329
99330 module_deallocate(mod, info);
99331 free_copy:
99332@@ -3467,10 +3634,16 @@ static const char *get_ksymbol(struct module *mod,
99333 unsigned long nextval;
99334
99335 /* At worse, next value is at end of module */
99336- if (within_module_init(addr, mod))
99337- nextval = (unsigned long)mod->module_init+mod->init_text_size;
99338+ if (within_module_init_rx(addr, mod))
99339+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
99340+ else if (within_module_init_rw(addr, mod))
99341+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
99342+ else if (within_module_core_rx(addr, mod))
99343+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
99344+ else if (within_module_core_rw(addr, mod))
99345+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
99346 else
99347- nextval = (unsigned long)mod->module_core+mod->core_text_size;
99348+ return NULL;
99349
99350 /* Scan for closest preceding symbol, and next symbol. (ELF
99351 starts real symbols at 1). */
99352@@ -3718,7 +3891,7 @@ static int m_show(struct seq_file *m, void *p)
99353 return 0;
99354
99355 seq_printf(m, "%s %u",
99356- mod->name, mod->init_size + mod->core_size);
99357+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
99358 print_unload_info(m, mod);
99359
99360 /* Informative for users. */
99361@@ -3727,7 +3900,7 @@ static int m_show(struct seq_file *m, void *p)
99362 mod->state == MODULE_STATE_COMING ? "Loading" :
99363 "Live");
99364 /* Used by oprofile and other similar tools. */
99365- seq_printf(m, " 0x%pK", mod->module_core);
99366+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
99367
99368 /* Taints info */
99369 if (mod->taints)
99370@@ -3763,7 +3936,17 @@ static const struct file_operations proc_modules_operations = {
99371
99372 static int __init proc_modules_init(void)
99373 {
99374+#ifndef CONFIG_GRKERNSEC_HIDESYM
99375+#ifdef CONFIG_GRKERNSEC_PROC_USER
99376+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
99377+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
99378+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
99379+#else
99380 proc_create("modules", 0, NULL, &proc_modules_operations);
99381+#endif
99382+#else
99383+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
99384+#endif
99385 return 0;
99386 }
99387 module_init(proc_modules_init);
99388@@ -3824,7 +4007,8 @@ struct module *__module_address(unsigned long addr)
99389 {
99390 struct module *mod;
99391
99392- if (addr < module_addr_min || addr > module_addr_max)
99393+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
99394+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
99395 return NULL;
99396
99397 list_for_each_entry_rcu(mod, &modules, list) {
99398@@ -3865,11 +4049,20 @@ bool is_module_text_address(unsigned long addr)
99399 */
99400 struct module *__module_text_address(unsigned long addr)
99401 {
99402- struct module *mod = __module_address(addr);
99403+ struct module *mod;
99404+
99405+#ifdef CONFIG_X86_32
99406+ addr = ktla_ktva(addr);
99407+#endif
99408+
99409+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
99410+ return NULL;
99411+
99412+ mod = __module_address(addr);
99413+
99414 if (mod) {
99415 /* Make sure it's within the text section. */
99416- if (!within(addr, mod->module_init, mod->init_text_size)
99417- && !within(addr, mod->module_core, mod->core_text_size))
99418+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
99419 mod = NULL;
99420 }
99421 return mod;
99422diff --git a/kernel/notifier.c b/kernel/notifier.c
99423index ae9fc7c..5085fbf 100644
99424--- a/kernel/notifier.c
99425+++ b/kernel/notifier.c
99426@@ -5,6 +5,7 @@
99427 #include <linux/rcupdate.h>
99428 #include <linux/vmalloc.h>
99429 #include <linux/reboot.h>
99430+#include <linux/mm.h>
99431
99432 /*
99433 * Notifier list for kernel code which wants to be called
99434@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
99435 while ((*nl) != NULL) {
99436 if (n->priority > (*nl)->priority)
99437 break;
99438- nl = &((*nl)->next);
99439+ nl = (struct notifier_block **)&((*nl)->next);
99440 }
99441- n->next = *nl;
99442+ pax_open_kernel();
99443+ *(const void **)&n->next = *nl;
99444 rcu_assign_pointer(*nl, n);
99445+ pax_close_kernel();
99446 return 0;
99447 }
99448
99449@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
99450 return 0;
99451 if (n->priority > (*nl)->priority)
99452 break;
99453- nl = &((*nl)->next);
99454+ nl = (struct notifier_block **)&((*nl)->next);
99455 }
99456- n->next = *nl;
99457+ pax_open_kernel();
99458+ *(const void **)&n->next = *nl;
99459 rcu_assign_pointer(*nl, n);
99460+ pax_close_kernel();
99461 return 0;
99462 }
99463
99464@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
99465 {
99466 while ((*nl) != NULL) {
99467 if ((*nl) == n) {
99468+ pax_open_kernel();
99469 rcu_assign_pointer(*nl, n->next);
99470+ pax_close_kernel();
99471 return 0;
99472 }
99473- nl = &((*nl)->next);
99474+ nl = (struct notifier_block **)&((*nl)->next);
99475 }
99476 return -ENOENT;
99477 }
99478diff --git a/kernel/padata.c b/kernel/padata.c
99479index b38bea9..91acfbe 100644
99480--- a/kernel/padata.c
99481+++ b/kernel/padata.c
99482@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
99483 * seq_nr mod. number of cpus in use.
99484 */
99485
99486- seq_nr = atomic_inc_return(&pd->seq_nr);
99487+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
99488 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
99489
99490 return padata_index_to_cpu(pd, cpu_index);
99491@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
99492 padata_init_pqueues(pd);
99493 padata_init_squeues(pd);
99494 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
99495- atomic_set(&pd->seq_nr, -1);
99496+ atomic_set_unchecked(&pd->seq_nr, -1);
99497 atomic_set(&pd->reorder_objects, 0);
99498 atomic_set(&pd->refcnt, 0);
99499 pd->pinst = pinst;
99500diff --git a/kernel/panic.c b/kernel/panic.c
99501index 8136ad7..15c857b 100644
99502--- a/kernel/panic.c
99503+++ b/kernel/panic.c
99504@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
99505 /*
99506 * Stop ourself in panic -- architecture code may override this
99507 */
99508-void __weak panic_smp_self_stop(void)
99509+void __weak __noreturn panic_smp_self_stop(void)
99510 {
99511 while (1)
99512 cpu_relax();
99513@@ -425,7 +425,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
99514 disable_trace_on_warning();
99515
99516 pr_warn("------------[ cut here ]------------\n");
99517- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
99518+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
99519 raw_smp_processor_id(), current->pid, file, line, caller);
99520
99521 if (args)
99522@@ -490,7 +490,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
99523 */
99524 __visible void __stack_chk_fail(void)
99525 {
99526- panic("stack-protector: Kernel stack is corrupted in: %p\n",
99527+ dump_stack();
99528+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
99529 __builtin_return_address(0));
99530 }
99531 EXPORT_SYMBOL(__stack_chk_fail);
99532diff --git a/kernel/pid.c b/kernel/pid.c
99533index cd36a5e..11f185d 100644
99534--- a/kernel/pid.c
99535+++ b/kernel/pid.c
99536@@ -33,6 +33,7 @@
99537 #include <linux/rculist.h>
99538 #include <linux/bootmem.h>
99539 #include <linux/hash.h>
99540+#include <linux/security.h>
99541 #include <linux/pid_namespace.h>
99542 #include <linux/init_task.h>
99543 #include <linux/syscalls.h>
99544@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
99545
99546 int pid_max = PID_MAX_DEFAULT;
99547
99548-#define RESERVED_PIDS 300
99549+#define RESERVED_PIDS 500
99550
99551 int pid_max_min = RESERVED_PIDS + 1;
99552 int pid_max_max = PID_MAX_LIMIT;
99553@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
99554 */
99555 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
99556 {
99557+ struct task_struct *task;
99558+
99559 rcu_lockdep_assert(rcu_read_lock_held(),
99560 "find_task_by_pid_ns() needs rcu_read_lock()"
99561 " protection");
99562- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
99563+
99564+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
99565+
99566+ if (gr_pid_is_chrooted(task))
99567+ return NULL;
99568+
99569+ return task;
99570 }
99571
99572 struct task_struct *find_task_by_vpid(pid_t vnr)
99573@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
99574 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
99575 }
99576
99577+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
99578+{
99579+ rcu_lockdep_assert(rcu_read_lock_held(),
99580+ "find_task_by_pid_ns() needs rcu_read_lock()"
99581+ " protection");
99582+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
99583+}
99584+
99585 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
99586 {
99587 struct pid *pid;
99588diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
99589index a65ba13..f600dbb 100644
99590--- a/kernel/pid_namespace.c
99591+++ b/kernel/pid_namespace.c
99592@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
99593 void __user *buffer, size_t *lenp, loff_t *ppos)
99594 {
99595 struct pid_namespace *pid_ns = task_active_pid_ns(current);
99596- struct ctl_table tmp = *table;
99597+ ctl_table_no_const tmp = *table;
99598
99599 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
99600 return -EPERM;
99601diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
99602index 7e01f78..f5da19d 100644
99603--- a/kernel/power/Kconfig
99604+++ b/kernel/power/Kconfig
99605@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
99606 config HIBERNATION
99607 bool "Hibernation (aka 'suspend to disk')"
99608 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
99609+ depends on !GRKERNSEC_KMEM
99610+ depends on !PAX_MEMORY_SANITIZE
99611 select HIBERNATE_CALLBACKS
99612 select LZO_COMPRESS
99613 select LZO_DECOMPRESS
99614diff --git a/kernel/power/process.c b/kernel/power/process.c
99615index 564f786..361a18e 100644
99616--- a/kernel/power/process.c
99617+++ b/kernel/power/process.c
99618@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
99619 unsigned int elapsed_msecs;
99620 bool wakeup = false;
99621 int sleep_usecs = USEC_PER_MSEC;
99622+ bool timedout = false;
99623
99624 do_gettimeofday(&start);
99625
99626@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
99627
99628 while (true) {
99629 todo = 0;
99630+ if (time_after(jiffies, end_time))
99631+ timedout = true;
99632 read_lock(&tasklist_lock);
99633 for_each_process_thread(g, p) {
99634 if (p == current || !freeze_task(p))
99635 continue;
99636
99637- if (!freezer_should_skip(p))
99638+ if (!freezer_should_skip(p)) {
99639 todo++;
99640+ if (timedout) {
99641+ printk(KERN_ERR "Task refusing to freeze:\n");
99642+ sched_show_task(p);
99643+ }
99644+ }
99645 }
99646 read_unlock(&tasklist_lock);
99647
99648@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
99649 todo += wq_busy;
99650 }
99651
99652- if (!todo || time_after(jiffies, end_time))
99653+ if (!todo || timedout)
99654 break;
99655
99656 if (pm_wakeup_pending()) {
99657diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
99658index bb0635b..9aff9f3 100644
99659--- a/kernel/printk/printk.c
99660+++ b/kernel/printk/printk.c
99661@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
99662 if (from_file && type != SYSLOG_ACTION_OPEN)
99663 return 0;
99664
99665+#ifdef CONFIG_GRKERNSEC_DMESG
99666+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
99667+ return -EPERM;
99668+#endif
99669+
99670 if (syslog_action_restricted(type)) {
99671 if (capable(CAP_SYSLOG))
99672 return 0;
99673diff --git a/kernel/profile.c b/kernel/profile.c
99674index a7bcd28..5b368fa 100644
99675--- a/kernel/profile.c
99676+++ b/kernel/profile.c
99677@@ -37,7 +37,7 @@ struct profile_hit {
99678 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
99679 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
99680
99681-static atomic_t *prof_buffer;
99682+static atomic_unchecked_t *prof_buffer;
99683 static unsigned long prof_len, prof_shift;
99684
99685 int prof_on __read_mostly;
99686@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
99687 hits[i].pc = 0;
99688 continue;
99689 }
99690- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
99691+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
99692 hits[i].hits = hits[i].pc = 0;
99693 }
99694 }
99695@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
99696 * Add the current hit(s) and flush the write-queue out
99697 * to the global buffer:
99698 */
99699- atomic_add(nr_hits, &prof_buffer[pc]);
99700+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
99701 for (i = 0; i < NR_PROFILE_HIT; ++i) {
99702- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
99703+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
99704 hits[i].pc = hits[i].hits = 0;
99705 }
99706 out:
99707@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
99708 {
99709 unsigned long pc;
99710 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
99711- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
99712+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
99713 }
99714 #endif /* !CONFIG_SMP */
99715
99716@@ -489,7 +489,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
99717 return -EFAULT;
99718 buf++; p++; count--; read++;
99719 }
99720- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
99721+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
99722 if (copy_to_user(buf, (void *)pnt, count))
99723 return -EFAULT;
99724 read += count;
99725@@ -520,7 +520,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
99726 }
99727 #endif
99728 profile_discard_flip_buffers();
99729- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
99730+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
99731 return count;
99732 }
99733
99734diff --git a/kernel/ptrace.c b/kernel/ptrace.c
99735index 9a34bd8..38d90e5 100644
99736--- a/kernel/ptrace.c
99737+++ b/kernel/ptrace.c
99738@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
99739 if (seize)
99740 flags |= PT_SEIZED;
99741 rcu_read_lock();
99742- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
99743+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
99744 flags |= PT_PTRACE_CAP;
99745 rcu_read_unlock();
99746 task->ptrace = flags;
99747@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
99748 break;
99749 return -EIO;
99750 }
99751- if (copy_to_user(dst, buf, retval))
99752+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
99753 return -EFAULT;
99754 copied += retval;
99755 src += retval;
99756@@ -803,7 +803,7 @@ int ptrace_request(struct task_struct *child, long request,
99757 bool seized = child->ptrace & PT_SEIZED;
99758 int ret = -EIO;
99759 siginfo_t siginfo, *si;
99760- void __user *datavp = (void __user *) data;
99761+ void __user *datavp = (__force void __user *) data;
99762 unsigned long __user *datalp = datavp;
99763 unsigned long flags;
99764
99765@@ -1049,14 +1049,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
99766 goto out;
99767 }
99768
99769+ if (gr_handle_ptrace(child, request)) {
99770+ ret = -EPERM;
99771+ goto out_put_task_struct;
99772+ }
99773+
99774 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
99775 ret = ptrace_attach(child, request, addr, data);
99776 /*
99777 * Some architectures need to do book-keeping after
99778 * a ptrace attach.
99779 */
99780- if (!ret)
99781+ if (!ret) {
99782 arch_ptrace_attach(child);
99783+ gr_audit_ptrace(child);
99784+ }
99785 goto out_put_task_struct;
99786 }
99787
99788@@ -1084,7 +1091,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
99789 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
99790 if (copied != sizeof(tmp))
99791 return -EIO;
99792- return put_user(tmp, (unsigned long __user *)data);
99793+ return put_user(tmp, (__force unsigned long __user *)data);
99794 }
99795
99796 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
99797@@ -1177,7 +1184,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
99798 }
99799
99800 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
99801- compat_long_t, addr, compat_long_t, data)
99802+ compat_ulong_t, addr, compat_ulong_t, data)
99803 {
99804 struct task_struct *child;
99805 long ret;
99806@@ -1193,14 +1200,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
99807 goto out;
99808 }
99809
99810+ if (gr_handle_ptrace(child, request)) {
99811+ ret = -EPERM;
99812+ goto out_put_task_struct;
99813+ }
99814+
99815 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
99816 ret = ptrace_attach(child, request, addr, data);
99817 /*
99818 * Some architectures need to do book-keeping after
99819 * a ptrace attach.
99820 */
99821- if (!ret)
99822+ if (!ret) {
99823 arch_ptrace_attach(child);
99824+ gr_audit_ptrace(child);
99825+ }
99826 goto out_put_task_struct;
99827 }
99828
99829diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
99830index 30d42aa..cac5d66 100644
99831--- a/kernel/rcu/rcutorture.c
99832+++ b/kernel/rcu/rcutorture.c
99833@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
99834 rcu_torture_count) = { 0 };
99835 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
99836 rcu_torture_batch) = { 0 };
99837-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
99838-static atomic_t n_rcu_torture_alloc;
99839-static atomic_t n_rcu_torture_alloc_fail;
99840-static atomic_t n_rcu_torture_free;
99841-static atomic_t n_rcu_torture_mberror;
99842-static atomic_t n_rcu_torture_error;
99843+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
99844+static atomic_unchecked_t n_rcu_torture_alloc;
99845+static atomic_unchecked_t n_rcu_torture_alloc_fail;
99846+static atomic_unchecked_t n_rcu_torture_free;
99847+static atomic_unchecked_t n_rcu_torture_mberror;
99848+static atomic_unchecked_t n_rcu_torture_error;
99849 static long n_rcu_torture_barrier_error;
99850 static long n_rcu_torture_boost_ktrerror;
99851 static long n_rcu_torture_boost_rterror;
99852@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
99853 static long n_rcu_torture_timers;
99854 static long n_barrier_attempts;
99855 static long n_barrier_successes;
99856-static atomic_long_t n_cbfloods;
99857+static atomic_long_unchecked_t n_cbfloods;
99858 static struct list_head rcu_torture_removed;
99859
99860 static int rcu_torture_writer_state;
99861@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
99862
99863 spin_lock_bh(&rcu_torture_lock);
99864 if (list_empty(&rcu_torture_freelist)) {
99865- atomic_inc(&n_rcu_torture_alloc_fail);
99866+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
99867 spin_unlock_bh(&rcu_torture_lock);
99868 return NULL;
99869 }
99870- atomic_inc(&n_rcu_torture_alloc);
99871+ atomic_inc_unchecked(&n_rcu_torture_alloc);
99872 p = rcu_torture_freelist.next;
99873 list_del_init(p);
99874 spin_unlock_bh(&rcu_torture_lock);
99875@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
99876 static void
99877 rcu_torture_free(struct rcu_torture *p)
99878 {
99879- atomic_inc(&n_rcu_torture_free);
99880+ atomic_inc_unchecked(&n_rcu_torture_free);
99881 spin_lock_bh(&rcu_torture_lock);
99882 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
99883 spin_unlock_bh(&rcu_torture_lock);
99884@@ -308,7 +308,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
99885 i = rp->rtort_pipe_count;
99886 if (i > RCU_TORTURE_PIPE_LEN)
99887 i = RCU_TORTURE_PIPE_LEN;
99888- atomic_inc(&rcu_torture_wcount[i]);
99889+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
99890 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
99891 rp->rtort_mbtest = 0;
99892 return true;
99893@@ -796,7 +796,7 @@ rcu_torture_cbflood(void *arg)
99894 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
99895 do {
99896 schedule_timeout_interruptible(cbflood_inter_holdoff);
99897- atomic_long_inc(&n_cbfloods);
99898+ atomic_long_inc_unchecked(&n_cbfloods);
99899 WARN_ON(signal_pending(current));
99900 for (i = 0; i < cbflood_n_burst; i++) {
99901 for (j = 0; j < cbflood_n_per_burst; j++) {
99902@@ -915,7 +915,7 @@ rcu_torture_writer(void *arg)
99903 i = old_rp->rtort_pipe_count;
99904 if (i > RCU_TORTURE_PIPE_LEN)
99905 i = RCU_TORTURE_PIPE_LEN;
99906- atomic_inc(&rcu_torture_wcount[i]);
99907+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
99908 old_rp->rtort_pipe_count++;
99909 switch (synctype[torture_random(&rand) % nsynctypes]) {
99910 case RTWS_DEF_FREE:
99911@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
99912 return;
99913 }
99914 if (p->rtort_mbtest == 0)
99915- atomic_inc(&n_rcu_torture_mberror);
99916+ atomic_inc_unchecked(&n_rcu_torture_mberror);
99917 spin_lock(&rand_lock);
99918 cur_ops->read_delay(&rand);
99919 n_rcu_torture_timers++;
99920@@ -1111,7 +1111,7 @@ rcu_torture_reader(void *arg)
99921 continue;
99922 }
99923 if (p->rtort_mbtest == 0)
99924- atomic_inc(&n_rcu_torture_mberror);
99925+ atomic_inc_unchecked(&n_rcu_torture_mberror);
99926 cur_ops->read_delay(&rand);
99927 preempt_disable();
99928 pipe_count = p->rtort_pipe_count;
99929@@ -1180,11 +1180,11 @@ rcu_torture_stats_print(void)
99930 rcu_torture_current,
99931 rcu_torture_current_version,
99932 list_empty(&rcu_torture_freelist),
99933- atomic_read(&n_rcu_torture_alloc),
99934- atomic_read(&n_rcu_torture_alloc_fail),
99935- atomic_read(&n_rcu_torture_free));
99936+ atomic_read_unchecked(&n_rcu_torture_alloc),
99937+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
99938+ atomic_read_unchecked(&n_rcu_torture_free));
99939 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
99940- atomic_read(&n_rcu_torture_mberror),
99941+ atomic_read_unchecked(&n_rcu_torture_mberror),
99942 n_rcu_torture_boost_ktrerror,
99943 n_rcu_torture_boost_rterror);
99944 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
99945@@ -1196,17 +1196,17 @@ rcu_torture_stats_print(void)
99946 n_barrier_successes,
99947 n_barrier_attempts,
99948 n_rcu_torture_barrier_error);
99949- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
99950+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
99951
99952 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
99953- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
99954+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
99955 n_rcu_torture_barrier_error != 0 ||
99956 n_rcu_torture_boost_ktrerror != 0 ||
99957 n_rcu_torture_boost_rterror != 0 ||
99958 n_rcu_torture_boost_failure != 0 ||
99959 i > 1) {
99960 pr_cont("%s", "!!! ");
99961- atomic_inc(&n_rcu_torture_error);
99962+ atomic_inc_unchecked(&n_rcu_torture_error);
99963 WARN_ON_ONCE(1);
99964 }
99965 pr_cont("Reader Pipe: ");
99966@@ -1223,7 +1223,7 @@ rcu_torture_stats_print(void)
99967 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
99968 pr_cont("Free-Block Circulation: ");
99969 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
99970- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
99971+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
99972 }
99973 pr_cont("\n");
99974
99975@@ -1570,7 +1570,7 @@ rcu_torture_cleanup(void)
99976
99977 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
99978
99979- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
99980+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
99981 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
99982 else if (torture_onoff_failures())
99983 rcu_torture_print_module_parms(cur_ops,
99984@@ -1695,18 +1695,18 @@ rcu_torture_init(void)
99985
99986 rcu_torture_current = NULL;
99987 rcu_torture_current_version = 0;
99988- atomic_set(&n_rcu_torture_alloc, 0);
99989- atomic_set(&n_rcu_torture_alloc_fail, 0);
99990- atomic_set(&n_rcu_torture_free, 0);
99991- atomic_set(&n_rcu_torture_mberror, 0);
99992- atomic_set(&n_rcu_torture_error, 0);
99993+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
99994+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
99995+ atomic_set_unchecked(&n_rcu_torture_free, 0);
99996+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
99997+ atomic_set_unchecked(&n_rcu_torture_error, 0);
99998 n_rcu_torture_barrier_error = 0;
99999 n_rcu_torture_boost_ktrerror = 0;
100000 n_rcu_torture_boost_rterror = 0;
100001 n_rcu_torture_boost_failure = 0;
100002 n_rcu_torture_boosts = 0;
100003 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
100004- atomic_set(&rcu_torture_wcount[i], 0);
100005+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
100006 for_each_possible_cpu(cpu) {
100007 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
100008 per_cpu(rcu_torture_count, cpu)[i] = 0;
100009diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
100010index cc9ceca..ce075a6 100644
100011--- a/kernel/rcu/tiny.c
100012+++ b/kernel/rcu/tiny.c
100013@@ -42,7 +42,7 @@
100014 /* Forward declarations for tiny_plugin.h. */
100015 struct rcu_ctrlblk;
100016 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
100017-static void rcu_process_callbacks(struct softirq_action *unused);
100018+static void rcu_process_callbacks(void);
100019 static void __call_rcu(struct rcu_head *head,
100020 void (*func)(struct rcu_head *rcu),
100021 struct rcu_ctrlblk *rcp);
100022@@ -210,7 +210,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
100023 false));
100024 }
100025
100026-static void rcu_process_callbacks(struct softirq_action *unused)
100027+static __latent_entropy void rcu_process_callbacks(void)
100028 {
100029 __rcu_process_callbacks(&rcu_sched_ctrlblk);
100030 __rcu_process_callbacks(&rcu_bh_ctrlblk);
100031diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
100032index f94e209..d2985bd 100644
100033--- a/kernel/rcu/tiny_plugin.h
100034+++ b/kernel/rcu/tiny_plugin.h
100035@@ -150,10 +150,10 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
100036 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
100037 jiffies - rcp->gp_start, rcp->qlen);
100038 dump_stack();
100039- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
100040+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
100041 3 * rcu_jiffies_till_stall_check() + 3;
100042 } else if (ULONG_CMP_GE(j, js)) {
100043- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100044+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100045 }
100046 }
100047
100048@@ -161,7 +161,7 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
100049 {
100050 rcp->ticks_this_gp = 0;
100051 rcp->gp_start = jiffies;
100052- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100053+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100054 }
100055
100056 static void check_cpu_stalls(void)
100057diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
100058index 48d640c..9401d30 100644
100059--- a/kernel/rcu/tree.c
100060+++ b/kernel/rcu/tree.c
100061@@ -268,7 +268,7 @@ static void rcu_momentary_dyntick_idle(void)
100062 */
100063 rdtp = this_cpu_ptr(&rcu_dynticks);
100064 smp_mb__before_atomic(); /* Earlier stuff before QS. */
100065- atomic_add(2, &rdtp->dynticks); /* QS. */
100066+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
100067 smp_mb__after_atomic(); /* Later stuff after QS. */
100068 break;
100069 }
100070@@ -580,9 +580,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
100071 rcu_prepare_for_idle();
100072 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
100073 smp_mb__before_atomic(); /* See above. */
100074- atomic_inc(&rdtp->dynticks);
100075+ atomic_inc_unchecked(&rdtp->dynticks);
100076 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
100077- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
100078+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
100079 rcu_dynticks_task_enter();
100080
100081 /*
100082@@ -703,10 +703,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
100083
100084 rcu_dynticks_task_exit();
100085 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
100086- atomic_inc(&rdtp->dynticks);
100087+ atomic_inc_unchecked(&rdtp->dynticks);
100088 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
100089 smp_mb__after_atomic(); /* See above. */
100090- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100091+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100092 rcu_cleanup_after_idle();
100093 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
100094 if (!user && !is_idle_task(current)) {
100095@@ -840,12 +840,12 @@ void rcu_nmi_enter(void)
100096 * to be in the outermost NMI handler that interrupted an RCU-idle
100097 * period (observation due to Andy Lutomirski).
100098 */
100099- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
100100+ if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
100101 smp_mb__before_atomic(); /* Force delay from prior write. */
100102- atomic_inc(&rdtp->dynticks);
100103+ atomic_inc_unchecked(&rdtp->dynticks);
100104 /* atomic_inc() before later RCU read-side crit sects */
100105 smp_mb__after_atomic(); /* See above. */
100106- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100107+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100108 incby = 1;
100109 }
100110 rdtp->dynticks_nmi_nesting += incby;
100111@@ -870,7 +870,7 @@ void rcu_nmi_exit(void)
100112 * to us!)
100113 */
100114 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
100115- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100116+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100117
100118 /*
100119 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
100120@@ -885,9 +885,9 @@ void rcu_nmi_exit(void)
100121 rdtp->dynticks_nmi_nesting = 0;
100122 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
100123 smp_mb__before_atomic(); /* See above. */
100124- atomic_inc(&rdtp->dynticks);
100125+ atomic_inc_unchecked(&rdtp->dynticks);
100126 smp_mb__after_atomic(); /* Force delay to next write. */
100127- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
100128+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
100129 }
100130
100131 /**
100132@@ -900,7 +900,7 @@ void rcu_nmi_exit(void)
100133 */
100134 bool notrace __rcu_is_watching(void)
100135 {
100136- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
100137+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
100138 }
100139
100140 /**
100141@@ -983,7 +983,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
100142 static int dyntick_save_progress_counter(struct rcu_data *rdp,
100143 bool *isidle, unsigned long *maxj)
100144 {
100145- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
100146+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
100147 rcu_sysidle_check_cpu(rdp, isidle, maxj);
100148 if ((rdp->dynticks_snap & 0x1) == 0) {
100149 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
100150@@ -991,7 +991,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
100151 } else {
100152 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
100153 rdp->mynode->gpnum))
100154- ACCESS_ONCE(rdp->gpwrap) = true;
100155+ ACCESS_ONCE_RW(rdp->gpwrap) = true;
100156 return 0;
100157 }
100158 }
100159@@ -1009,7 +1009,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
100160 int *rcrmp;
100161 unsigned int snap;
100162
100163- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
100164+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
100165 snap = (unsigned int)rdp->dynticks_snap;
100166
100167 /*
100168@@ -1072,10 +1072,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
100169 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
100170 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
100171 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
100172- ACCESS_ONCE(rdp->cond_resched_completed) =
100173+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
100174 ACCESS_ONCE(rdp->mynode->completed);
100175 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
100176- ACCESS_ONCE(*rcrmp) =
100177+ ACCESS_ONCE_RW(*rcrmp) =
100178 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
100179 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
100180 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
100181@@ -1097,7 +1097,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
100182 rsp->gp_start = j;
100183 smp_wmb(); /* Record start time before stall time. */
100184 j1 = rcu_jiffies_till_stall_check();
100185- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
100186+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
100187 rsp->jiffies_resched = j + j1 / 2;
100188 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
100189 }
100190@@ -1156,7 +1156,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
100191 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100192 return;
100193 }
100194- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
100195+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
100196 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100197
100198 /*
100199@@ -1240,7 +1240,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
100200
100201 raw_spin_lock_irqsave(&rnp->lock, flags);
100202 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
100203- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
100204+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
100205 3 * rcu_jiffies_till_stall_check() + 3;
100206 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100207
100208@@ -1324,7 +1324,7 @@ void rcu_cpu_stall_reset(void)
100209 struct rcu_state *rsp;
100210
100211 for_each_rcu_flavor(rsp)
100212- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
100213+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
100214 }
100215
100216 /*
100217@@ -1671,7 +1671,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
100218 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
100219 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
100220 zero_cpu_stall_ticks(rdp);
100221- ACCESS_ONCE(rdp->gpwrap) = false;
100222+ ACCESS_ONCE_RW(rdp->gpwrap) = false;
100223 }
100224 return ret;
100225 }
100226@@ -1706,7 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100227 struct rcu_data *rdp;
100228 struct rcu_node *rnp = rcu_get_root(rsp);
100229
100230- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100231+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100232 rcu_bind_gp_kthread();
100233 raw_spin_lock_irq(&rnp->lock);
100234 smp_mb__after_unlock_lock();
100235@@ -1715,7 +1715,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100236 raw_spin_unlock_irq(&rnp->lock);
100237 return 0;
100238 }
100239- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
100240+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
100241
100242 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
100243 /*
100244@@ -1756,9 +1756,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
100245 rdp = this_cpu_ptr(rsp->rda);
100246 rcu_preempt_check_blocked_tasks(rnp);
100247 rnp->qsmask = rnp->qsmaskinit;
100248- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
100249+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
100250 WARN_ON_ONCE(rnp->completed != rsp->completed);
100251- ACCESS_ONCE(rnp->completed) = rsp->completed;
100252+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
100253 if (rnp == rdp->mynode)
100254 (void)__note_gp_changes(rsp, rnp, rdp);
100255 rcu_preempt_boost_start_gp(rnp);
100256@@ -1767,7 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100257 rnp->grphi, rnp->qsmask);
100258 raw_spin_unlock_irq(&rnp->lock);
100259 cond_resched_rcu_qs();
100260- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100261+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100262 }
100263
100264 mutex_unlock(&rsp->onoff_mutex);
100265@@ -1784,7 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
100266 unsigned long maxj;
100267 struct rcu_node *rnp = rcu_get_root(rsp);
100268
100269- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100270+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100271 rsp->n_force_qs++;
100272 if (fqs_state == RCU_SAVE_DYNTICK) {
100273 /* Collect dyntick-idle snapshots. */
100274@@ -1805,7 +1805,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
100275 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
100276 raw_spin_lock_irq(&rnp->lock);
100277 smp_mb__after_unlock_lock();
100278- ACCESS_ONCE(rsp->gp_flags) =
100279+ ACCESS_ONCE_RW(rsp->gp_flags) =
100280 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
100281 raw_spin_unlock_irq(&rnp->lock);
100282 }
100283@@ -1823,7 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100284 struct rcu_data *rdp;
100285 struct rcu_node *rnp = rcu_get_root(rsp);
100286
100287- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100288+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100289 raw_spin_lock_irq(&rnp->lock);
100290 smp_mb__after_unlock_lock();
100291 gp_duration = jiffies - rsp->gp_start;
100292@@ -1852,7 +1852,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100293 rcu_for_each_node_breadth_first(rsp, rnp) {
100294 raw_spin_lock_irq(&rnp->lock);
100295 smp_mb__after_unlock_lock();
100296- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
100297+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
100298 rdp = this_cpu_ptr(rsp->rda);
100299 if (rnp == rdp->mynode)
100300 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
100301@@ -1860,7 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100302 nocb += rcu_future_gp_cleanup(rsp, rnp);
100303 raw_spin_unlock_irq(&rnp->lock);
100304 cond_resched_rcu_qs();
100305- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100306+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100307 }
100308 rnp = rcu_get_root(rsp);
100309 raw_spin_lock_irq(&rnp->lock);
100310@@ -1868,14 +1868,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100311 rcu_nocb_gp_set(rnp, nocb);
100312
100313 /* Declare grace period done. */
100314- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
100315+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
100316 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
100317 rsp->fqs_state = RCU_GP_IDLE;
100318 rdp = this_cpu_ptr(rsp->rda);
100319 /* Advance CBs to reduce false positives below. */
100320 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
100321 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
100322- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100323+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100324 trace_rcu_grace_period(rsp->name,
100325 ACCESS_ONCE(rsp->gpnum),
100326 TPS("newreq"));
100327@@ -1910,7 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
100328 if (rcu_gp_init(rsp))
100329 break;
100330 cond_resched_rcu_qs();
100331- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100332+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100333 WARN_ON(signal_pending(current));
100334 trace_rcu_grace_period(rsp->name,
100335 ACCESS_ONCE(rsp->gpnum),
100336@@ -1954,11 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
100337 ACCESS_ONCE(rsp->gpnum),
100338 TPS("fqsend"));
100339 cond_resched_rcu_qs();
100340- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100341+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100342 } else {
100343 /* Deal with stray signal. */
100344 cond_resched_rcu_qs();
100345- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100346+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100347 WARN_ON(signal_pending(current));
100348 trace_rcu_grace_period(rsp->name,
100349 ACCESS_ONCE(rsp->gpnum),
100350@@ -2003,7 +2003,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
100351 */
100352 return false;
100353 }
100354- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100355+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100356 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
100357 TPS("newreq"));
100358
100359@@ -2228,7 +2228,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
100360 rsp->qlen += rdp->qlen;
100361 rdp->n_cbs_orphaned += rdp->qlen;
100362 rdp->qlen_lazy = 0;
100363- ACCESS_ONCE(rdp->qlen) = 0;
100364+ ACCESS_ONCE_RW(rdp->qlen) = 0;
100365 }
100366
100367 /*
100368@@ -2490,7 +2490,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
100369 }
100370 smp_mb(); /* List handling before counting for rcu_barrier(). */
100371 rdp->qlen_lazy -= count_lazy;
100372- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
100373+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
100374 rdp->n_cbs_invoked += count;
100375
100376 /* Reinstate batch limit if we have worked down the excess. */
100377@@ -2647,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
100378 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
100379 return; /* Someone beat us to it. */
100380 }
100381- ACCESS_ONCE(rsp->gp_flags) =
100382+ ACCESS_ONCE_RW(rsp->gp_flags) =
100383 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
100384 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
100385 rcu_gp_kthread_wake(rsp);
100386@@ -2693,7 +2693,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
100387 /*
100388 * Do RCU core processing for the current CPU.
100389 */
100390-static void rcu_process_callbacks(struct softirq_action *unused)
100391+static void rcu_process_callbacks(void)
100392 {
100393 struct rcu_state *rsp;
100394
100395@@ -2805,7 +2805,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
100396 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
100397 if (debug_rcu_head_queue(head)) {
100398 /* Probable double call_rcu(), so leak the callback. */
100399- ACCESS_ONCE(head->func) = rcu_leak_callback;
100400+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
100401 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
100402 return;
100403 }
100404@@ -2833,7 +2833,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
100405 local_irq_restore(flags);
100406 return;
100407 }
100408- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
100409+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
100410 if (lazy)
100411 rdp->qlen_lazy++;
100412 else
100413@@ -3106,11 +3106,11 @@ void synchronize_sched_expedited(void)
100414 * counter wrap on a 32-bit system. Quite a few more CPUs would of
100415 * course be required on a 64-bit system.
100416 */
100417- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
100418+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
100419 (ulong)atomic_long_read(&rsp->expedited_done) +
100420 ULONG_MAX / 8)) {
100421 synchronize_sched();
100422- atomic_long_inc(&rsp->expedited_wrap);
100423+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
100424 return;
100425 }
100426
100427@@ -3118,12 +3118,12 @@ void synchronize_sched_expedited(void)
100428 * Take a ticket. Note that atomic_inc_return() implies a
100429 * full memory barrier.
100430 */
100431- snap = atomic_long_inc_return(&rsp->expedited_start);
100432+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
100433 firstsnap = snap;
100434 if (!try_get_online_cpus()) {
100435 /* CPU hotplug operation in flight, fall back to normal GP. */
100436 wait_rcu_gp(call_rcu_sched);
100437- atomic_long_inc(&rsp->expedited_normal);
100438+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100439 return;
100440 }
100441 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
100442@@ -3136,7 +3136,7 @@ void synchronize_sched_expedited(void)
100443 for_each_cpu(cpu, cm) {
100444 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
100445
100446- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
100447+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
100448 cpumask_clear_cpu(cpu, cm);
100449 }
100450 if (cpumask_weight(cm) == 0)
100451@@ -3151,14 +3151,14 @@ void synchronize_sched_expedited(void)
100452 synchronize_sched_expedited_cpu_stop,
100453 NULL) == -EAGAIN) {
100454 put_online_cpus();
100455- atomic_long_inc(&rsp->expedited_tryfail);
100456+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
100457
100458 /* Check to see if someone else did our work for us. */
100459 s = atomic_long_read(&rsp->expedited_done);
100460 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
100461 /* ensure test happens before caller kfree */
100462 smp_mb__before_atomic(); /* ^^^ */
100463- atomic_long_inc(&rsp->expedited_workdone1);
100464+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
100465 free_cpumask_var(cm);
100466 return;
100467 }
100468@@ -3168,7 +3168,7 @@ void synchronize_sched_expedited(void)
100469 udelay(trycount * num_online_cpus());
100470 } else {
100471 wait_rcu_gp(call_rcu_sched);
100472- atomic_long_inc(&rsp->expedited_normal);
100473+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100474 free_cpumask_var(cm);
100475 return;
100476 }
100477@@ -3178,7 +3178,7 @@ void synchronize_sched_expedited(void)
100478 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
100479 /* ensure test happens before caller kfree */
100480 smp_mb__before_atomic(); /* ^^^ */
100481- atomic_long_inc(&rsp->expedited_workdone2);
100482+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
100483 free_cpumask_var(cm);
100484 return;
100485 }
100486@@ -3193,14 +3193,14 @@ void synchronize_sched_expedited(void)
100487 if (!try_get_online_cpus()) {
100488 /* CPU hotplug operation in flight, use normal GP. */
100489 wait_rcu_gp(call_rcu_sched);
100490- atomic_long_inc(&rsp->expedited_normal);
100491+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100492 free_cpumask_var(cm);
100493 return;
100494 }
100495- snap = atomic_long_read(&rsp->expedited_start);
100496+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
100497 smp_mb(); /* ensure read is before try_stop_cpus(). */
100498 }
100499- atomic_long_inc(&rsp->expedited_stoppedcpus);
100500+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
100501
100502 all_cpus_idle:
100503 free_cpumask_var(cm);
100504@@ -3212,16 +3212,16 @@ all_cpus_idle:
100505 * than we did already did their update.
100506 */
100507 do {
100508- atomic_long_inc(&rsp->expedited_done_tries);
100509+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
100510 s = atomic_long_read(&rsp->expedited_done);
100511 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
100512 /* ensure test happens before caller kfree */
100513 smp_mb__before_atomic(); /* ^^^ */
100514- atomic_long_inc(&rsp->expedited_done_lost);
100515+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
100516 break;
100517 }
100518 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
100519- atomic_long_inc(&rsp->expedited_done_exit);
100520+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
100521
100522 put_online_cpus();
100523 }
100524@@ -3431,7 +3431,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
100525 * ACCESS_ONCE() to prevent the compiler from speculating
100526 * the increment to precede the early-exit check.
100527 */
100528- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100529+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100530 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
100531 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
100532 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
100533@@ -3487,7 +3487,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
100534
100535 /* Increment ->n_barrier_done to prevent duplicate work. */
100536 smp_mb(); /* Keep increment after above mechanism. */
100537- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100538+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100539 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
100540 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
100541 smp_mb(); /* Keep increment before caller's subsequent code. */
100542@@ -3532,7 +3532,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
100543 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
100544 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
100545 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
100546- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
100547+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
100548 rdp->cpu = cpu;
100549 rdp->rsp = rsp;
100550 rcu_boot_init_nocb_percpu_data(rdp);
100551@@ -3565,8 +3565,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
100552 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
100553 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
100554 rcu_sysidle_init_percpu_data(rdp->dynticks);
100555- atomic_set(&rdp->dynticks->dynticks,
100556- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
100557+ atomic_set_unchecked(&rdp->dynticks->dynticks,
100558+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
100559 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
100560
100561 /* Add CPU to rcu_node bitmasks. */
100562diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
100563index 119de39..f07d31a 100644
100564--- a/kernel/rcu/tree.h
100565+++ b/kernel/rcu/tree.h
100566@@ -86,11 +86,11 @@ struct rcu_dynticks {
100567 long long dynticks_nesting; /* Track irq/process nesting level. */
100568 /* Process level is worth LLONG_MAX/2. */
100569 int dynticks_nmi_nesting; /* Track NMI nesting level. */
100570- atomic_t dynticks; /* Even value for idle, else odd. */
100571+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
100572 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
100573 long long dynticks_idle_nesting;
100574 /* irq/process nesting level from idle. */
100575- atomic_t dynticks_idle; /* Even value for idle, else odd. */
100576+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
100577 /* "Idle" excludes userspace execution. */
100578 unsigned long dynticks_idle_jiffies;
100579 /* End of last non-NMI non-idle period. */
100580@@ -457,17 +457,17 @@ struct rcu_state {
100581 /* _rcu_barrier(). */
100582 /* End of fields guarded by barrier_mutex. */
100583
100584- atomic_long_t expedited_start; /* Starting ticket. */
100585- atomic_long_t expedited_done; /* Done ticket. */
100586- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
100587- atomic_long_t expedited_tryfail; /* # acquisition failures. */
100588- atomic_long_t expedited_workdone1; /* # done by others #1. */
100589- atomic_long_t expedited_workdone2; /* # done by others #2. */
100590- atomic_long_t expedited_normal; /* # fallbacks to normal. */
100591- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
100592- atomic_long_t expedited_done_tries; /* # tries to update _done. */
100593- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
100594- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
100595+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
100596+ atomic_long_t expedited_done; /* Done ticket. */
100597+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
100598+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
100599+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
100600+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
100601+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
100602+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
100603+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
100604+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
100605+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
100606
100607 unsigned long jiffies_force_qs; /* Time at which to invoke */
100608 /* force_quiescent_state(). */
100609diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
100610index 0a571e9..fbfd611 100644
100611--- a/kernel/rcu/tree_plugin.h
100612+++ b/kernel/rcu/tree_plugin.h
100613@@ -619,7 +619,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
100614 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
100615 {
100616 return !rcu_preempted_readers_exp(rnp) &&
100617- ACCESS_ONCE(rnp->expmask) == 0;
100618+ ACCESS_ONCE_RW(rnp->expmask) == 0;
100619 }
100620
100621 /*
100622@@ -780,7 +780,7 @@ void synchronize_rcu_expedited(void)
100623
100624 /* Clean up and exit. */
100625 smp_mb(); /* ensure expedited GP seen before counter increment. */
100626- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
100627+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
100628 sync_rcu_preempt_exp_count + 1;
100629 unlock_mb_ret:
100630 mutex_unlock(&sync_rcu_preempt_exp_mutex);
100631@@ -1290,7 +1290,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
100632 free_cpumask_var(cm);
100633 }
100634
100635-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
100636+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
100637 .store = &rcu_cpu_kthread_task,
100638 .thread_should_run = rcu_cpu_kthread_should_run,
100639 .thread_fn = rcu_cpu_kthread,
100640@@ -1761,7 +1761,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
100641 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
100642 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
100643 cpu, ticks_value, ticks_title,
100644- atomic_read(&rdtp->dynticks) & 0xfff,
100645+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
100646 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
100647 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
100648 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
100649@@ -1906,7 +1906,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
100650 return;
100651 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
100652 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
100653- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
100654+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
100655 wake_up(&rdp_leader->nocb_wq);
100656 }
100657 }
100658@@ -1978,7 +1978,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
100659 atomic_long_add(rhcount, &rdp->nocb_q_count);
100660 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
100661 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
100662- ACCESS_ONCE(*old_rhpp) = rhp;
100663+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
100664 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
100665 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
100666
100667@@ -2167,7 +2167,7 @@ wait_again:
100668 continue; /* No CBs here, try next follower. */
100669
100670 /* Move callbacks to wait-for-GP list, which is empty. */
100671- ACCESS_ONCE(rdp->nocb_head) = NULL;
100672+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
100673 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
100674 gotcbs = true;
100675 }
100676@@ -2288,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
100677 list = ACCESS_ONCE(rdp->nocb_follower_head);
100678 BUG_ON(!list);
100679 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
100680- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
100681+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
100682 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
100683
100684 /* Each pass through the following loop invokes a callback. */
100685@@ -2338,7 +2338,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
100686 if (!rcu_nocb_need_deferred_wakeup(rdp))
100687 return;
100688 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
100689- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
100690+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
100691 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
100692 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
100693 }
100694@@ -2461,7 +2461,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
100695 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
100696 "rcuo%c/%d", rsp->abbr, cpu);
100697 BUG_ON(IS_ERR(t));
100698- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
100699+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
100700 }
100701
100702 /*
100703@@ -2666,11 +2666,11 @@ static void rcu_sysidle_enter(int irq)
100704
100705 /* Record start of fully idle period. */
100706 j = jiffies;
100707- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
100708+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
100709 smp_mb__before_atomic();
100710- atomic_inc(&rdtp->dynticks_idle);
100711+ atomic_inc_unchecked(&rdtp->dynticks_idle);
100712 smp_mb__after_atomic();
100713- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
100714+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
100715 }
100716
100717 /*
100718@@ -2741,9 +2741,9 @@ static void rcu_sysidle_exit(int irq)
100719
100720 /* Record end of idle period. */
100721 smp_mb__before_atomic();
100722- atomic_inc(&rdtp->dynticks_idle);
100723+ atomic_inc_unchecked(&rdtp->dynticks_idle);
100724 smp_mb__after_atomic();
100725- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
100726+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
100727
100728 /*
100729 * If we are the timekeeping CPU, we are permitted to be non-idle
100730@@ -2788,7 +2788,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
100731 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
100732
100733 /* Pick up current idle and NMI-nesting counter and check. */
100734- cur = atomic_read(&rdtp->dynticks_idle);
100735+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
100736 if (cur & 0x1) {
100737 *isidle = false; /* We are not idle! */
100738 return;
100739@@ -2837,7 +2837,7 @@ static void rcu_sysidle(unsigned long j)
100740 case RCU_SYSIDLE_NOT:
100741
100742 /* First time all are idle, so note a short idle period. */
100743- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
100744+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
100745 break;
100746
100747 case RCU_SYSIDLE_SHORT:
100748@@ -2875,7 +2875,7 @@ static void rcu_sysidle_cancel(void)
100749 {
100750 smp_mb();
100751 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
100752- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
100753+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
100754 }
100755
100756 /*
100757@@ -2927,7 +2927,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
100758 smp_mb(); /* grace period precedes setting inuse. */
100759
100760 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
100761- ACCESS_ONCE(rshp->inuse) = 0;
100762+ ACCESS_ONCE_RW(rshp->inuse) = 0;
100763 }
100764
100765 /*
100766@@ -3080,7 +3080,7 @@ static void rcu_bind_gp_kthread(void)
100767 static void rcu_dynticks_task_enter(void)
100768 {
100769 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
100770- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
100771+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
100772 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
100773 }
100774
100775@@ -3088,6 +3088,6 @@ static void rcu_dynticks_task_enter(void)
100776 static void rcu_dynticks_task_exit(void)
100777 {
100778 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
100779- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
100780+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
100781 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
100782 }
100783diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
100784index fbb6240..f6c5097 100644
100785--- a/kernel/rcu/tree_trace.c
100786+++ b/kernel/rcu/tree_trace.c
100787@@ -125,7 +125,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
100788 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
100789 rdp->qs_pending);
100790 seq_printf(m, " dt=%d/%llx/%d df=%lu",
100791- atomic_read(&rdp->dynticks->dynticks),
100792+ atomic_read_unchecked(&rdp->dynticks->dynticks),
100793 rdp->dynticks->dynticks_nesting,
100794 rdp->dynticks->dynticks_nmi_nesting,
100795 rdp->dynticks_fqs);
100796@@ -186,17 +186,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
100797 struct rcu_state *rsp = (struct rcu_state *)m->private;
100798
100799 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
100800- atomic_long_read(&rsp->expedited_start),
100801+ atomic_long_read_unchecked(&rsp->expedited_start),
100802 atomic_long_read(&rsp->expedited_done),
100803- atomic_long_read(&rsp->expedited_wrap),
100804- atomic_long_read(&rsp->expedited_tryfail),
100805- atomic_long_read(&rsp->expedited_workdone1),
100806- atomic_long_read(&rsp->expedited_workdone2),
100807- atomic_long_read(&rsp->expedited_normal),
100808- atomic_long_read(&rsp->expedited_stoppedcpus),
100809- atomic_long_read(&rsp->expedited_done_tries),
100810- atomic_long_read(&rsp->expedited_done_lost),
100811- atomic_long_read(&rsp->expedited_done_exit));
100812+ atomic_long_read_unchecked(&rsp->expedited_wrap),
100813+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
100814+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
100815+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
100816+ atomic_long_read_unchecked(&rsp->expedited_normal),
100817+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
100818+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
100819+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
100820+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
100821 return 0;
100822 }
100823
100824diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
100825index e0d31a3..f4dafe3 100644
100826--- a/kernel/rcu/update.c
100827+++ b/kernel/rcu/update.c
100828@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
100829 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
100830 */
100831 if (till_stall_check < 3) {
100832- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
100833+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
100834 till_stall_check = 3;
100835 } else if (till_stall_check > 300) {
100836- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
100837+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
100838 till_stall_check = 300;
100839 }
100840 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
100841@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
100842 !ACCESS_ONCE(t->on_rq) ||
100843 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
100844 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
100845- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
100846+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
100847 list_del_init(&t->rcu_tasks_holdout_list);
100848 put_task_struct(t);
100849 return;
100850@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
100851 !is_idle_task(t)) {
100852 get_task_struct(t);
100853 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
100854- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
100855+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
100856 list_add(&t->rcu_tasks_holdout_list,
100857 &rcu_tasks_holdouts);
100858 }
100859@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
100860 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
100861 BUG_ON(IS_ERR(t));
100862 smp_mb(); /* Ensure others see full kthread. */
100863- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
100864+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
100865 mutex_unlock(&rcu_tasks_kthread_mutex);
100866 }
100867
100868diff --git a/kernel/resource.c b/kernel/resource.c
100869index 19f2357..ebe7f35 100644
100870--- a/kernel/resource.c
100871+++ b/kernel/resource.c
100872@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
100873
100874 static int __init ioresources_init(void)
100875 {
100876+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100877+#ifdef CONFIG_GRKERNSEC_PROC_USER
100878+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
100879+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
100880+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100881+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
100882+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
100883+#endif
100884+#else
100885 proc_create("ioports", 0, NULL, &proc_ioports_operations);
100886 proc_create("iomem", 0, NULL, &proc_iomem_operations);
100887+#endif
100888 return 0;
100889 }
100890 __initcall(ioresources_init);
100891diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
100892index eae160d..c9aa22e 100644
100893--- a/kernel/sched/auto_group.c
100894+++ b/kernel/sched/auto_group.c
100895@@ -11,7 +11,7 @@
100896
100897 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
100898 static struct autogroup autogroup_default;
100899-static atomic_t autogroup_seq_nr;
100900+static atomic_unchecked_t autogroup_seq_nr;
100901
100902 void __init autogroup_init(struct task_struct *init_task)
100903 {
100904@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
100905
100906 kref_init(&ag->kref);
100907 init_rwsem(&ag->lock);
100908- ag->id = atomic_inc_return(&autogroup_seq_nr);
100909+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
100910 ag->tg = tg;
100911 #ifdef CONFIG_RT_GROUP_SCHED
100912 /*
100913diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
100914index 8d0f35d..c16360d 100644
100915--- a/kernel/sched/completion.c
100916+++ b/kernel/sched/completion.c
100917@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
100918 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
100919 * or number of jiffies left till timeout) if completed.
100920 */
100921-long __sched
100922+long __sched __intentional_overflow(-1)
100923 wait_for_completion_interruptible_timeout(struct completion *x,
100924 unsigned long timeout)
100925 {
100926@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
100927 *
100928 * Return: -ERESTARTSYS if interrupted, 0 if completed.
100929 */
100930-int __sched wait_for_completion_killable(struct completion *x)
100931+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
100932 {
100933 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
100934 if (t == -ERESTARTSYS)
100935@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
100936 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
100937 * or number of jiffies left till timeout) if completed.
100938 */
100939-long __sched
100940+long __sched __intentional_overflow(-1)
100941 wait_for_completion_killable_timeout(struct completion *x,
100942 unsigned long timeout)
100943 {
100944diff --git a/kernel/sched/core.c b/kernel/sched/core.c
100945index 3d5f6f6..a94298f 100644
100946--- a/kernel/sched/core.c
100947+++ b/kernel/sched/core.c
100948@@ -1862,7 +1862,7 @@ void set_numabalancing_state(bool enabled)
100949 int sysctl_numa_balancing(struct ctl_table *table, int write,
100950 void __user *buffer, size_t *lenp, loff_t *ppos)
100951 {
100952- struct ctl_table t;
100953+ ctl_table_no_const t;
100954 int err;
100955 int state = numabalancing_enabled;
100956
100957@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
100958 next->active_mm = oldmm;
100959 atomic_inc(&oldmm->mm_count);
100960 enter_lazy_tlb(oldmm, next);
100961- } else
100962+ } else {
100963 switch_mm(oldmm, mm, next);
100964+ populate_stack();
100965+ }
100966
100967 if (!prev->mm) {
100968 prev->active_mm = NULL;
100969@@ -3124,6 +3126,8 @@ int can_nice(const struct task_struct *p, const int nice)
100970 /* convert nice value [19,-20] to rlimit style value [1,40] */
100971 int nice_rlim = nice_to_rlimit(nice);
100972
100973+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
100974+
100975 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
100976 capable(CAP_SYS_NICE));
100977 }
100978@@ -3150,7 +3154,8 @@ SYSCALL_DEFINE1(nice, int, increment)
100979 nice = task_nice(current) + increment;
100980
100981 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
100982- if (increment < 0 && !can_nice(current, nice))
100983+ if (increment < 0 && (!can_nice(current, nice) ||
100984+ gr_handle_chroot_nice()))
100985 return -EPERM;
100986
100987 retval = security_task_setnice(current, nice);
100988@@ -3459,6 +3464,7 @@ recheck:
100989 if (policy != p->policy && !rlim_rtprio)
100990 return -EPERM;
100991
100992+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
100993 /* can't increase priority */
100994 if (attr->sched_priority > p->rt_priority &&
100995 attr->sched_priority > rlim_rtprio)
100996@@ -4946,6 +4952,7 @@ void idle_task_exit(void)
100997
100998 if (mm != &init_mm) {
100999 switch_mm(mm, &init_mm, current);
101000+ populate_stack();
101001 finish_arch_post_lock_switch();
101002 }
101003 mmdrop(mm);
101004@@ -5041,7 +5048,7 @@ static void migrate_tasks(unsigned int dead_cpu)
101005
101006 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
101007
101008-static struct ctl_table sd_ctl_dir[] = {
101009+static ctl_table_no_const sd_ctl_dir[] __read_only = {
101010 {
101011 .procname = "sched_domain",
101012 .mode = 0555,
101013@@ -5058,17 +5065,17 @@ static struct ctl_table sd_ctl_root[] = {
101014 {}
101015 };
101016
101017-static struct ctl_table *sd_alloc_ctl_entry(int n)
101018+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
101019 {
101020- struct ctl_table *entry =
101021+ ctl_table_no_const *entry =
101022 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
101023
101024 return entry;
101025 }
101026
101027-static void sd_free_ctl_entry(struct ctl_table **tablep)
101028+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
101029 {
101030- struct ctl_table *entry;
101031+ ctl_table_no_const *entry;
101032
101033 /*
101034 * In the intermediate directories, both the child directory and
101035@@ -5076,22 +5083,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
101036 * will always be set. In the lowest directory the names are
101037 * static strings and all have proc handlers.
101038 */
101039- for (entry = *tablep; entry->mode; entry++) {
101040- if (entry->child)
101041- sd_free_ctl_entry(&entry->child);
101042+ for (entry = tablep; entry->mode; entry++) {
101043+ if (entry->child) {
101044+ sd_free_ctl_entry(entry->child);
101045+ pax_open_kernel();
101046+ entry->child = NULL;
101047+ pax_close_kernel();
101048+ }
101049 if (entry->proc_handler == NULL)
101050 kfree(entry->procname);
101051 }
101052
101053- kfree(*tablep);
101054- *tablep = NULL;
101055+ kfree(tablep);
101056 }
101057
101058 static int min_load_idx = 0;
101059 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
101060
101061 static void
101062-set_table_entry(struct ctl_table *entry,
101063+set_table_entry(ctl_table_no_const *entry,
101064 const char *procname, void *data, int maxlen,
101065 umode_t mode, proc_handler *proc_handler,
101066 bool load_idx)
101067@@ -5111,7 +5121,7 @@ set_table_entry(struct ctl_table *entry,
101068 static struct ctl_table *
101069 sd_alloc_ctl_domain_table(struct sched_domain *sd)
101070 {
101071- struct ctl_table *table = sd_alloc_ctl_entry(14);
101072+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
101073
101074 if (table == NULL)
101075 return NULL;
101076@@ -5149,9 +5159,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
101077 return table;
101078 }
101079
101080-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
101081+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
101082 {
101083- struct ctl_table *entry, *table;
101084+ ctl_table_no_const *entry, *table;
101085 struct sched_domain *sd;
101086 int domain_num = 0, i;
101087 char buf[32];
101088@@ -5178,11 +5188,13 @@ static struct ctl_table_header *sd_sysctl_header;
101089 static void register_sched_domain_sysctl(void)
101090 {
101091 int i, cpu_num = num_possible_cpus();
101092- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
101093+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
101094 char buf[32];
101095
101096 WARN_ON(sd_ctl_dir[0].child);
101097+ pax_open_kernel();
101098 sd_ctl_dir[0].child = entry;
101099+ pax_close_kernel();
101100
101101 if (entry == NULL)
101102 return;
101103@@ -5205,8 +5217,12 @@ static void unregister_sched_domain_sysctl(void)
101104 if (sd_sysctl_header)
101105 unregister_sysctl_table(sd_sysctl_header);
101106 sd_sysctl_header = NULL;
101107- if (sd_ctl_dir[0].child)
101108- sd_free_ctl_entry(&sd_ctl_dir[0].child);
101109+ if (sd_ctl_dir[0].child) {
101110+ sd_free_ctl_entry(sd_ctl_dir[0].child);
101111+ pax_open_kernel();
101112+ sd_ctl_dir[0].child = NULL;
101113+ pax_close_kernel();
101114+ }
101115 }
101116 #else
101117 static void register_sched_domain_sysctl(void)
101118diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
101119index 241213b..6a64c91 100644
101120--- a/kernel/sched/fair.c
101121+++ b/kernel/sched/fair.c
101122@@ -2092,7 +2092,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
101123
101124 static void reset_ptenuma_scan(struct task_struct *p)
101125 {
101126- ACCESS_ONCE(p->mm->numa_scan_seq)++;
101127+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
101128 p->mm->numa_scan_offset = 0;
101129 }
101130
101131@@ -7656,7 +7656,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
101132 * run_rebalance_domains is triggered when needed from the scheduler tick.
101133 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
101134 */
101135-static void run_rebalance_domains(struct softirq_action *h)
101136+static __latent_entropy void run_rebalance_domains(void)
101137 {
101138 struct rq *this_rq = this_rq();
101139 enum cpu_idle_type idle = this_rq->idle_balance ?
101140diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
101141index dc0f435..ae2e085 100644
101142--- a/kernel/sched/sched.h
101143+++ b/kernel/sched/sched.h
101144@@ -1200,7 +1200,7 @@ struct sched_class {
101145 #ifdef CONFIG_FAIR_GROUP_SCHED
101146 void (*task_move_group) (struct task_struct *p, int on_rq);
101147 #endif
101148-};
101149+} __do_const;
101150
101151 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
101152 {
101153diff --git a/kernel/signal.c b/kernel/signal.c
101154index a390499..ebe9a21 100644
101155--- a/kernel/signal.c
101156+++ b/kernel/signal.c
101157@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
101158
101159 int print_fatal_signals __read_mostly;
101160
101161-static void __user *sig_handler(struct task_struct *t, int sig)
101162+static __sighandler_t sig_handler(struct task_struct *t, int sig)
101163 {
101164 return t->sighand->action[sig - 1].sa.sa_handler;
101165 }
101166
101167-static int sig_handler_ignored(void __user *handler, int sig)
101168+static int sig_handler_ignored(__sighandler_t handler, int sig)
101169 {
101170 /* Is it explicitly or implicitly ignored? */
101171 return handler == SIG_IGN ||
101172@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
101173
101174 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
101175 {
101176- void __user *handler;
101177+ __sighandler_t handler;
101178
101179 handler = sig_handler(t, sig);
101180
101181@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
101182 atomic_inc(&user->sigpending);
101183 rcu_read_unlock();
101184
101185+ if (!override_rlimit)
101186+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
101187+
101188 if (override_rlimit ||
101189 atomic_read(&user->sigpending) <=
101190 task_rlimit(t, RLIMIT_SIGPENDING)) {
101191@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
101192
101193 int unhandled_signal(struct task_struct *tsk, int sig)
101194 {
101195- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
101196+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
101197 if (is_global_init(tsk))
101198 return 1;
101199 if (handler != SIG_IGN && handler != SIG_DFL)
101200@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
101201 }
101202 }
101203
101204+ /* allow glibc communication via tgkill to other threads in our
101205+ thread group */
101206+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
101207+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
101208+ && gr_handle_signal(t, sig))
101209+ return -EPERM;
101210+
101211 return security_task_kill(t, info, sig, 0);
101212 }
101213
101214@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
101215 return send_signal(sig, info, p, 1);
101216 }
101217
101218-static int
101219+int
101220 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101221 {
101222 return send_signal(sig, info, t, 0);
101223@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101224 unsigned long int flags;
101225 int ret, blocked, ignored;
101226 struct k_sigaction *action;
101227+ int is_unhandled = 0;
101228
101229 spin_lock_irqsave(&t->sighand->siglock, flags);
101230 action = &t->sighand->action[sig-1];
101231@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101232 }
101233 if (action->sa.sa_handler == SIG_DFL)
101234 t->signal->flags &= ~SIGNAL_UNKILLABLE;
101235+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
101236+ is_unhandled = 1;
101237 ret = specific_send_sig_info(sig, info, t);
101238 spin_unlock_irqrestore(&t->sighand->siglock, flags);
101239
101240+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
101241+ normal operation */
101242+ if (is_unhandled) {
101243+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
101244+ gr_handle_crash(t, sig);
101245+ }
101246+
101247 return ret;
101248 }
101249
101250@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
101251 ret = check_kill_permission(sig, info, p);
101252 rcu_read_unlock();
101253
101254- if (!ret && sig)
101255+ if (!ret && sig) {
101256 ret = do_send_sig_info(sig, info, p, true);
101257+ if (!ret)
101258+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
101259+ }
101260
101261 return ret;
101262 }
101263@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
101264 int error = -ESRCH;
101265
101266 rcu_read_lock();
101267- p = find_task_by_vpid(pid);
101268+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
101269+ /* allow glibc communication via tgkill to other threads in our
101270+ thread group */
101271+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
101272+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
101273+ p = find_task_by_vpid_unrestricted(pid);
101274+ else
101275+#endif
101276+ p = find_task_by_vpid(pid);
101277 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
101278 error = check_kill_permission(sig, info, p);
101279 /*
101280@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
101281 }
101282 seg = get_fs();
101283 set_fs(KERNEL_DS);
101284- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
101285- (stack_t __force __user *) &uoss,
101286+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
101287+ (stack_t __force_user *) &uoss,
101288 compat_user_stack_pointer());
101289 set_fs(seg);
101290 if (ret >= 0 && uoss_ptr) {
101291diff --git a/kernel/smpboot.c b/kernel/smpboot.c
101292index 40190f2..8861d40 100644
101293--- a/kernel/smpboot.c
101294+++ b/kernel/smpboot.c
101295@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
101296 }
101297 smpboot_unpark_thread(plug_thread, cpu);
101298 }
101299- list_add(&plug_thread->list, &hotplug_threads);
101300+ pax_list_add(&plug_thread->list, &hotplug_threads);
101301 out:
101302 mutex_unlock(&smpboot_threads_lock);
101303 put_online_cpus();
101304@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
101305 {
101306 get_online_cpus();
101307 mutex_lock(&smpboot_threads_lock);
101308- list_del(&plug_thread->list);
101309+ pax_list_del(&plug_thread->list);
101310 smpboot_destroy_threads(plug_thread);
101311 mutex_unlock(&smpboot_threads_lock);
101312 put_online_cpus();
101313diff --git a/kernel/softirq.c b/kernel/softirq.c
101314index 479e443..66d845e1 100644
101315--- a/kernel/softirq.c
101316+++ b/kernel/softirq.c
101317@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
101318 EXPORT_SYMBOL(irq_stat);
101319 #endif
101320
101321-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
101322+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
101323
101324 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
101325
101326@@ -270,7 +270,7 @@ restart:
101327 kstat_incr_softirqs_this_cpu(vec_nr);
101328
101329 trace_softirq_entry(vec_nr);
101330- h->action(h);
101331+ h->action();
101332 trace_softirq_exit(vec_nr);
101333 if (unlikely(prev_count != preempt_count())) {
101334 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
101335@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
101336 or_softirq_pending(1UL << nr);
101337 }
101338
101339-void open_softirq(int nr, void (*action)(struct softirq_action *))
101340+void __init open_softirq(int nr, void (*action)(void))
101341 {
101342 softirq_vec[nr].action = action;
101343 }
101344@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
101345 }
101346 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
101347
101348-static void tasklet_action(struct softirq_action *a)
101349+static void tasklet_action(void)
101350 {
101351 struct tasklet_struct *list;
101352
101353@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
101354 }
101355 }
101356
101357-static void tasklet_hi_action(struct softirq_action *a)
101358+static __latent_entropy void tasklet_hi_action(void)
101359 {
101360 struct tasklet_struct *list;
101361
101362@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
101363 .notifier_call = cpu_callback
101364 };
101365
101366-static struct smp_hotplug_thread softirq_threads = {
101367+static struct smp_hotplug_thread softirq_threads __read_only = {
101368 .store = &ksoftirqd,
101369 .thread_should_run = ksoftirqd_should_run,
101370 .thread_fn = run_ksoftirqd,
101371diff --git a/kernel/sys.c b/kernel/sys.c
101372index a03d9cd..55dbe9c 100644
101373--- a/kernel/sys.c
101374+++ b/kernel/sys.c
101375@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
101376 error = -EACCES;
101377 goto out;
101378 }
101379+
101380+ if (gr_handle_chroot_setpriority(p, niceval)) {
101381+ error = -EACCES;
101382+ goto out;
101383+ }
101384+
101385 no_nice = security_task_setnice(p, niceval);
101386 if (no_nice) {
101387 error = no_nice;
101388@@ -365,6 +371,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
101389 goto error;
101390 }
101391
101392+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
101393+ goto error;
101394+
101395+ if (!gid_eq(new->gid, old->gid)) {
101396+ /* make sure we generate a learn log for what will
101397+ end up being a role transition after a full-learning
101398+ policy is generated
101399+ CAP_SETGID is required to perform a transition
101400+ we may not log a CAP_SETGID check above, e.g.
101401+ in the case where new rgid = old egid
101402+ */
101403+ gr_learn_cap(current, new, CAP_SETGID);
101404+ }
101405+
101406 if (rgid != (gid_t) -1 ||
101407 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
101408 new->sgid = new->egid;
101409@@ -400,6 +420,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
101410 old = current_cred();
101411
101412 retval = -EPERM;
101413+
101414+ if (gr_check_group_change(kgid, kgid, kgid))
101415+ goto error;
101416+
101417 if (ns_capable(old->user_ns, CAP_SETGID))
101418 new->gid = new->egid = new->sgid = new->fsgid = kgid;
101419 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
101420@@ -417,7 +441,7 @@ error:
101421 /*
101422 * change the user struct in a credentials set to match the new UID
101423 */
101424-static int set_user(struct cred *new)
101425+int set_user(struct cred *new)
101426 {
101427 struct user_struct *new_user;
101428
101429@@ -497,7 +521,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
101430 goto error;
101431 }
101432
101433+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
101434+ goto error;
101435+
101436 if (!uid_eq(new->uid, old->uid)) {
101437+ /* make sure we generate a learn log for what will
101438+ end up being a role transition after a full-learning
101439+ policy is generated
101440+ CAP_SETUID is required to perform a transition
101441+ we may not log a CAP_SETUID check above, e.g.
101442+ in the case where new ruid = old euid
101443+ */
101444+ gr_learn_cap(current, new, CAP_SETUID);
101445 retval = set_user(new);
101446 if (retval < 0)
101447 goto error;
101448@@ -547,6 +582,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
101449 old = current_cred();
101450
101451 retval = -EPERM;
101452+
101453+ if (gr_check_crash_uid(kuid))
101454+ goto error;
101455+ if (gr_check_user_change(kuid, kuid, kuid))
101456+ goto error;
101457+
101458 if (ns_capable(old->user_ns, CAP_SETUID)) {
101459 new->suid = new->uid = kuid;
101460 if (!uid_eq(kuid, old->uid)) {
101461@@ -616,6 +657,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
101462 goto error;
101463 }
101464
101465+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
101466+ goto error;
101467+
101468 if (ruid != (uid_t) -1) {
101469 new->uid = kruid;
101470 if (!uid_eq(kruid, old->uid)) {
101471@@ -700,6 +744,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
101472 goto error;
101473 }
101474
101475+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
101476+ goto error;
101477+
101478 if (rgid != (gid_t) -1)
101479 new->gid = krgid;
101480 if (egid != (gid_t) -1)
101481@@ -764,12 +811,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
101482 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
101483 ns_capable(old->user_ns, CAP_SETUID)) {
101484 if (!uid_eq(kuid, old->fsuid)) {
101485+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
101486+ goto error;
101487+
101488 new->fsuid = kuid;
101489 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
101490 goto change_okay;
101491 }
101492 }
101493
101494+error:
101495 abort_creds(new);
101496 return old_fsuid;
101497
101498@@ -802,12 +853,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
101499 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
101500 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
101501 ns_capable(old->user_ns, CAP_SETGID)) {
101502+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
101503+ goto error;
101504+
101505 if (!gid_eq(kgid, old->fsgid)) {
101506 new->fsgid = kgid;
101507 goto change_okay;
101508 }
101509 }
101510
101511+error:
101512 abort_creds(new);
101513 return old_fsgid;
101514
101515@@ -1185,19 +1240,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
101516 return -EFAULT;
101517
101518 down_read(&uts_sem);
101519- error = __copy_to_user(&name->sysname, &utsname()->sysname,
101520+ error = __copy_to_user(name->sysname, &utsname()->sysname,
101521 __OLD_UTS_LEN);
101522 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
101523- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
101524+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
101525 __OLD_UTS_LEN);
101526 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
101527- error |= __copy_to_user(&name->release, &utsname()->release,
101528+ error |= __copy_to_user(name->release, &utsname()->release,
101529 __OLD_UTS_LEN);
101530 error |= __put_user(0, name->release + __OLD_UTS_LEN);
101531- error |= __copy_to_user(&name->version, &utsname()->version,
101532+ error |= __copy_to_user(name->version, &utsname()->version,
101533 __OLD_UTS_LEN);
101534 error |= __put_user(0, name->version + __OLD_UTS_LEN);
101535- error |= __copy_to_user(&name->machine, &utsname()->machine,
101536+ error |= __copy_to_user(name->machine, &utsname()->machine,
101537 __OLD_UTS_LEN);
101538 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
101539 up_read(&uts_sem);
101540@@ -1398,6 +1453,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
101541 */
101542 new_rlim->rlim_cur = 1;
101543 }
101544+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
101545+ is changed to a lower value. Since tasks can be created by the same
101546+ user in between this limit change and an execve by this task, force
101547+ a recheck only for this task by setting PF_NPROC_EXCEEDED
101548+ */
101549+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
101550+ tsk->flags |= PF_NPROC_EXCEEDED;
101551 }
101552 if (!retval) {
101553 if (old_rlim)
101554diff --git a/kernel/sysctl.c b/kernel/sysctl.c
101555index ce410bb..cd276f0 100644
101556--- a/kernel/sysctl.c
101557+++ b/kernel/sysctl.c
101558@@ -94,7 +94,6 @@
101559
101560
101561 #if defined(CONFIG_SYSCTL)
101562-
101563 /* External variables not in a header file. */
101564 extern int max_threads;
101565 extern int suid_dumpable;
101566@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
101567
101568 /* Constants used for minimum and maximum */
101569 #ifdef CONFIG_LOCKUP_DETECTOR
101570-static int sixty = 60;
101571+static int sixty __read_only = 60;
101572 #endif
101573
101574-static int __maybe_unused neg_one = -1;
101575+static int __maybe_unused neg_one __read_only = -1;
101576
101577-static int zero;
101578-static int __maybe_unused one = 1;
101579-static int __maybe_unused two = 2;
101580-static int __maybe_unused four = 4;
101581-static unsigned long one_ul = 1;
101582-static int one_hundred = 100;
101583+static int zero __read_only = 0;
101584+static int __maybe_unused one __read_only = 1;
101585+static int __maybe_unused two __read_only = 2;
101586+static int __maybe_unused three __read_only = 3;
101587+static int __maybe_unused four __read_only = 4;
101588+static unsigned long one_ul __read_only = 1;
101589+static int one_hundred __read_only = 100;
101590 #ifdef CONFIG_PRINTK
101591-static int ten_thousand = 10000;
101592+static int ten_thousand __read_only = 10000;
101593 #endif
101594
101595 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
101596@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
101597 void __user *buffer, size_t *lenp, loff_t *ppos);
101598 #endif
101599
101600-#ifdef CONFIG_PRINTK
101601 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
101602 void __user *buffer, size_t *lenp, loff_t *ppos);
101603-#endif
101604
101605 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
101606 void __user *buffer, size_t *lenp, loff_t *ppos);
101607@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
101608
101609 #endif
101610
101611+extern struct ctl_table grsecurity_table[];
101612+
101613 static struct ctl_table kern_table[];
101614 static struct ctl_table vm_table[];
101615 static struct ctl_table fs_table[];
101616@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
101617 int sysctl_legacy_va_layout;
101618 #endif
101619
101620+#ifdef CONFIG_PAX_SOFTMODE
101621+static struct ctl_table pax_table[] = {
101622+ {
101623+ .procname = "softmode",
101624+ .data = &pax_softmode,
101625+ .maxlen = sizeof(unsigned int),
101626+ .mode = 0600,
101627+ .proc_handler = &proc_dointvec,
101628+ },
101629+
101630+ { }
101631+};
101632+#endif
101633+
101634 /* The default sysctl tables: */
101635
101636 static struct ctl_table sysctl_base_table[] = {
101637@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
101638 #endif
101639
101640 static struct ctl_table kern_table[] = {
101641+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
101642+ {
101643+ .procname = "grsecurity",
101644+ .mode = 0500,
101645+ .child = grsecurity_table,
101646+ },
101647+#endif
101648+
101649+#ifdef CONFIG_PAX_SOFTMODE
101650+ {
101651+ .procname = "pax",
101652+ .mode = 0500,
101653+ .child = pax_table,
101654+ },
101655+#endif
101656+
101657 {
101658 .procname = "sched_child_runs_first",
101659 .data = &sysctl_sched_child_runs_first,
101660@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
101661 .data = &modprobe_path,
101662 .maxlen = KMOD_PATH_LEN,
101663 .mode = 0644,
101664- .proc_handler = proc_dostring,
101665+ .proc_handler = proc_dostring_modpriv,
101666 },
101667 {
101668 .procname = "modules_disabled",
101669@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
101670 .extra1 = &zero,
101671 .extra2 = &one,
101672 },
101673+#endif
101674 {
101675 .procname = "kptr_restrict",
101676 .data = &kptr_restrict,
101677 .maxlen = sizeof(int),
101678 .mode = 0644,
101679 .proc_handler = proc_dointvec_minmax_sysadmin,
101680+#ifdef CONFIG_GRKERNSEC_HIDESYM
101681+ .extra1 = &two,
101682+#else
101683 .extra1 = &zero,
101684+#endif
101685 .extra2 = &two,
101686 },
101687-#endif
101688 {
101689 .procname = "ngroups_max",
101690 .data = &ngroups_max,
101691@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
101692 */
101693 {
101694 .procname = "perf_event_paranoid",
101695- .data = &sysctl_perf_event_paranoid,
101696- .maxlen = sizeof(sysctl_perf_event_paranoid),
101697+ .data = &sysctl_perf_event_legitimately_concerned,
101698+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
101699 .mode = 0644,
101700- .proc_handler = proc_dointvec,
101701+ /* go ahead, be a hero */
101702+ .proc_handler = proc_dointvec_minmax_sysadmin,
101703+ .extra1 = &neg_one,
101704+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
101705+ .extra2 = &three,
101706+#else
101707+ .extra2 = &two,
101708+#endif
101709 },
101710 {
101711 .procname = "perf_event_mlock_kb",
101712@@ -1348,6 +1389,13 @@ static struct ctl_table vm_table[] = {
101713 .proc_handler = proc_dointvec_minmax,
101714 .extra1 = &zero,
101715 },
101716+ {
101717+ .procname = "heap_stack_gap",
101718+ .data = &sysctl_heap_stack_gap,
101719+ .maxlen = sizeof(sysctl_heap_stack_gap),
101720+ .mode = 0644,
101721+ .proc_handler = proc_doulongvec_minmax,
101722+ },
101723 #else
101724 {
101725 .procname = "nr_trim_pages",
101726@@ -1830,6 +1878,16 @@ int proc_dostring(struct ctl_table *table, int write,
101727 (char __user *)buffer, lenp, ppos);
101728 }
101729
101730+int proc_dostring_modpriv(struct ctl_table *table, int write,
101731+ void __user *buffer, size_t *lenp, loff_t *ppos)
101732+{
101733+ if (write && !capable(CAP_SYS_MODULE))
101734+ return -EPERM;
101735+
101736+ return _proc_do_string(table->data, table->maxlen, write,
101737+ buffer, lenp, ppos);
101738+}
101739+
101740 static size_t proc_skip_spaces(char **buf)
101741 {
101742 size_t ret;
101743@@ -1935,6 +1993,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
101744 len = strlen(tmp);
101745 if (len > *size)
101746 len = *size;
101747+ if (len > sizeof(tmp))
101748+ len = sizeof(tmp);
101749 if (copy_to_user(*buf, tmp, len))
101750 return -EFAULT;
101751 *size -= len;
101752@@ -2112,7 +2172,7 @@ int proc_dointvec(struct ctl_table *table, int write,
101753 static int proc_taint(struct ctl_table *table, int write,
101754 void __user *buffer, size_t *lenp, loff_t *ppos)
101755 {
101756- struct ctl_table t;
101757+ ctl_table_no_const t;
101758 unsigned long tmptaint = get_taint();
101759 int err;
101760
101761@@ -2140,7 +2200,6 @@ static int proc_taint(struct ctl_table *table, int write,
101762 return err;
101763 }
101764
101765-#ifdef CONFIG_PRINTK
101766 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
101767 void __user *buffer, size_t *lenp, loff_t *ppos)
101768 {
101769@@ -2149,7 +2208,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
101770
101771 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
101772 }
101773-#endif
101774
101775 struct do_proc_dointvec_minmax_conv_param {
101776 int *min;
101777@@ -2709,6 +2767,12 @@ int proc_dostring(struct ctl_table *table, int write,
101778 return -ENOSYS;
101779 }
101780
101781+int proc_dostring_modpriv(struct ctl_table *table, int write,
101782+ void __user *buffer, size_t *lenp, loff_t *ppos)
101783+{
101784+ return -ENOSYS;
101785+}
101786+
101787 int proc_dointvec(struct ctl_table *table, int write,
101788 void __user *buffer, size_t *lenp, loff_t *ppos)
101789 {
101790@@ -2765,5 +2829,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
101791 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
101792 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
101793 EXPORT_SYMBOL(proc_dostring);
101794+EXPORT_SYMBOL(proc_dostring_modpriv);
101795 EXPORT_SYMBOL(proc_doulongvec_minmax);
101796 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
101797diff --git a/kernel/taskstats.c b/kernel/taskstats.c
101798index 21f82c2..c1984e5 100644
101799--- a/kernel/taskstats.c
101800+++ b/kernel/taskstats.c
101801@@ -28,9 +28,12 @@
101802 #include <linux/fs.h>
101803 #include <linux/file.h>
101804 #include <linux/pid_namespace.h>
101805+#include <linux/grsecurity.h>
101806 #include <net/genetlink.h>
101807 #include <linux/atomic.h>
101808
101809+extern int gr_is_taskstats_denied(int pid);
101810+
101811 /*
101812 * Maximum length of a cpumask that can be specified in
101813 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
101814@@ -567,6 +570,9 @@ err:
101815
101816 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
101817 {
101818+ if (gr_is_taskstats_denied(current->pid))
101819+ return -EACCES;
101820+
101821 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
101822 return cmd_attr_register_cpumask(info);
101823 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
101824diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
101825index 1b001ed..55ef9e4 100644
101826--- a/kernel/time/alarmtimer.c
101827+++ b/kernel/time/alarmtimer.c
101828@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
101829 struct platform_device *pdev;
101830 int error = 0;
101831 int i;
101832- struct k_clock alarm_clock = {
101833+ static struct k_clock alarm_clock = {
101834 .clock_getres = alarm_clock_getres,
101835 .clock_get = alarm_clock_get,
101836 .timer_create = alarm_timer_create,
101837diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
101838index bee0c1f..a23fe2d 100644
101839--- a/kernel/time/hrtimer.c
101840+++ b/kernel/time/hrtimer.c
101841@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
101842 local_irq_restore(flags);
101843 }
101844
101845-static void run_hrtimer_softirq(struct softirq_action *h)
101846+static __latent_entropy void run_hrtimer_softirq(void)
101847 {
101848 hrtimer_peek_ahead_timers();
101849 }
101850diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
101851index 0075da7..63cc872 100644
101852--- a/kernel/time/posix-cpu-timers.c
101853+++ b/kernel/time/posix-cpu-timers.c
101854@@ -1449,14 +1449,14 @@ struct k_clock clock_posix_cpu = {
101855
101856 static __init int init_posix_cpu_timers(void)
101857 {
101858- struct k_clock process = {
101859+ static struct k_clock process = {
101860 .clock_getres = process_cpu_clock_getres,
101861 .clock_get = process_cpu_clock_get,
101862 .timer_create = process_cpu_timer_create,
101863 .nsleep = process_cpu_nsleep,
101864 .nsleep_restart = process_cpu_nsleep_restart,
101865 };
101866- struct k_clock thread = {
101867+ static struct k_clock thread = {
101868 .clock_getres = thread_cpu_clock_getres,
101869 .clock_get = thread_cpu_clock_get,
101870 .timer_create = thread_cpu_timer_create,
101871diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
101872index 31ea01f..7fc61ef 100644
101873--- a/kernel/time/posix-timers.c
101874+++ b/kernel/time/posix-timers.c
101875@@ -43,6 +43,7 @@
101876 #include <linux/hash.h>
101877 #include <linux/posix-clock.h>
101878 #include <linux/posix-timers.h>
101879+#include <linux/grsecurity.h>
101880 #include <linux/syscalls.h>
101881 #include <linux/wait.h>
101882 #include <linux/workqueue.h>
101883@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
101884 * which we beg off on and pass to do_sys_settimeofday().
101885 */
101886
101887-static struct k_clock posix_clocks[MAX_CLOCKS];
101888+static struct k_clock *posix_clocks[MAX_CLOCKS];
101889
101890 /*
101891 * These ones are defined below.
101892@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
101893 */
101894 static __init int init_posix_timers(void)
101895 {
101896- struct k_clock clock_realtime = {
101897+ static struct k_clock clock_realtime = {
101898 .clock_getres = hrtimer_get_res,
101899 .clock_get = posix_clock_realtime_get,
101900 .clock_set = posix_clock_realtime_set,
101901@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
101902 .timer_get = common_timer_get,
101903 .timer_del = common_timer_del,
101904 };
101905- struct k_clock clock_monotonic = {
101906+ static struct k_clock clock_monotonic = {
101907 .clock_getres = hrtimer_get_res,
101908 .clock_get = posix_ktime_get_ts,
101909 .nsleep = common_nsleep,
101910@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
101911 .timer_get = common_timer_get,
101912 .timer_del = common_timer_del,
101913 };
101914- struct k_clock clock_monotonic_raw = {
101915+ static struct k_clock clock_monotonic_raw = {
101916 .clock_getres = hrtimer_get_res,
101917 .clock_get = posix_get_monotonic_raw,
101918 };
101919- struct k_clock clock_realtime_coarse = {
101920+ static struct k_clock clock_realtime_coarse = {
101921 .clock_getres = posix_get_coarse_res,
101922 .clock_get = posix_get_realtime_coarse,
101923 };
101924- struct k_clock clock_monotonic_coarse = {
101925+ static struct k_clock clock_monotonic_coarse = {
101926 .clock_getres = posix_get_coarse_res,
101927 .clock_get = posix_get_monotonic_coarse,
101928 };
101929- struct k_clock clock_tai = {
101930+ static struct k_clock clock_tai = {
101931 .clock_getres = hrtimer_get_res,
101932 .clock_get = posix_get_tai,
101933 .nsleep = common_nsleep,
101934@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
101935 .timer_get = common_timer_get,
101936 .timer_del = common_timer_del,
101937 };
101938- struct k_clock clock_boottime = {
101939+ static struct k_clock clock_boottime = {
101940 .clock_getres = hrtimer_get_res,
101941 .clock_get = posix_get_boottime,
101942 .nsleep = common_nsleep,
101943@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
101944 return;
101945 }
101946
101947- posix_clocks[clock_id] = *new_clock;
101948+ posix_clocks[clock_id] = new_clock;
101949 }
101950 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
101951
101952@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
101953 return (id & CLOCKFD_MASK) == CLOCKFD ?
101954 &clock_posix_dynamic : &clock_posix_cpu;
101955
101956- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
101957+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
101958 return NULL;
101959- return &posix_clocks[id];
101960+ return posix_clocks[id];
101961 }
101962
101963 static int common_timer_create(struct k_itimer *new_timer)
101964@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
101965 struct k_clock *kc = clockid_to_kclock(which_clock);
101966 struct k_itimer *new_timer;
101967 int error, new_timer_id;
101968- sigevent_t event;
101969+ sigevent_t event = { };
101970 int it_id_set = IT_ID_NOT_SET;
101971
101972 if (!kc)
101973@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
101974 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
101975 return -EFAULT;
101976
101977+ /* only the CLOCK_REALTIME clock can be set, all other clocks
101978+ have their clock_set fptr set to a nosettime dummy function
101979+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
101980+ call common_clock_set, which calls do_sys_settimeofday, which
101981+ we hook
101982+ */
101983+
101984 return kc->clock_set(which_clock, &new_tp);
101985 }
101986
101987diff --git a/kernel/time/time.c b/kernel/time/time.c
101988index 2c85b77..6530536 100644
101989--- a/kernel/time/time.c
101990+++ b/kernel/time/time.c
101991@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
101992 return error;
101993
101994 if (tz) {
101995+ /* we log in do_settimeofday called below, so don't log twice
101996+ */
101997+ if (!tv)
101998+ gr_log_timechange();
101999+
102000 sys_tz = *tz;
102001 update_vsyscall_tz();
102002 if (firsttime) {
102003diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
102004index 91db941..a371671 100644
102005--- a/kernel/time/timekeeping.c
102006+++ b/kernel/time/timekeeping.c
102007@@ -15,6 +15,7 @@
102008 #include <linux/init.h>
102009 #include <linux/mm.h>
102010 #include <linux/sched.h>
102011+#include <linux/grsecurity.h>
102012 #include <linux/syscore_ops.h>
102013 #include <linux/clocksource.h>
102014 #include <linux/jiffies.h>
102015@@ -802,6 +803,8 @@ int do_settimeofday64(const struct timespec64 *ts)
102016 if (!timespec64_valid_strict(ts))
102017 return -EINVAL;
102018
102019+ gr_log_timechange();
102020+
102021 raw_spin_lock_irqsave(&timekeeper_lock, flags);
102022 write_seqcount_begin(&tk_core.seq);
102023
102024diff --git a/kernel/time/timer.c b/kernel/time/timer.c
102025index 2d3f5c5..7ed7dc5 100644
102026--- a/kernel/time/timer.c
102027+++ b/kernel/time/timer.c
102028@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
102029 /*
102030 * This function runs timers and the timer-tq in bottom half context.
102031 */
102032-static void run_timer_softirq(struct softirq_action *h)
102033+static __latent_entropy void run_timer_softirq(void)
102034 {
102035 struct tvec_base *base = __this_cpu_read(tvec_bases);
102036
102037@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
102038 *
102039 * In all cases the return value is guaranteed to be non-negative.
102040 */
102041-signed long __sched schedule_timeout(signed long timeout)
102042+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
102043 {
102044 struct timer_list timer;
102045 unsigned long expire;
102046diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
102047index 61ed862..3b52c65 100644
102048--- a/kernel/time/timer_list.c
102049+++ b/kernel/time/timer_list.c
102050@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
102051
102052 static void print_name_offset(struct seq_file *m, void *sym)
102053 {
102054+#ifdef CONFIG_GRKERNSEC_HIDESYM
102055+ SEQ_printf(m, "<%p>", NULL);
102056+#else
102057 char symname[KSYM_NAME_LEN];
102058
102059 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
102060 SEQ_printf(m, "<%pK>", sym);
102061 else
102062 SEQ_printf(m, "%s", symname);
102063+#endif
102064 }
102065
102066 static void
102067@@ -119,7 +123,11 @@ next_one:
102068 static void
102069 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
102070 {
102071+#ifdef CONFIG_GRKERNSEC_HIDESYM
102072+ SEQ_printf(m, " .base: %p\n", NULL);
102073+#else
102074 SEQ_printf(m, " .base: %pK\n", base);
102075+#endif
102076 SEQ_printf(m, " .index: %d\n",
102077 base->index);
102078 SEQ_printf(m, " .resolution: %Lu nsecs\n",
102079@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
102080 {
102081 struct proc_dir_entry *pe;
102082
102083+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102084+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
102085+#else
102086 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
102087+#endif
102088 if (!pe)
102089 return -ENOMEM;
102090 return 0;
102091diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
102092index 1fb08f2..ca4bb1e 100644
102093--- a/kernel/time/timer_stats.c
102094+++ b/kernel/time/timer_stats.c
102095@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
102096 static unsigned long nr_entries;
102097 static struct entry entries[MAX_ENTRIES];
102098
102099-static atomic_t overflow_count;
102100+static atomic_unchecked_t overflow_count;
102101
102102 /*
102103 * The entries are in a hash-table, for fast lookup:
102104@@ -140,7 +140,7 @@ static void reset_entries(void)
102105 nr_entries = 0;
102106 memset(entries, 0, sizeof(entries));
102107 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
102108- atomic_set(&overflow_count, 0);
102109+ atomic_set_unchecked(&overflow_count, 0);
102110 }
102111
102112 static struct entry *alloc_entry(void)
102113@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
102114 if (likely(entry))
102115 entry->count++;
102116 else
102117- atomic_inc(&overflow_count);
102118+ atomic_inc_unchecked(&overflow_count);
102119
102120 out_unlock:
102121 raw_spin_unlock_irqrestore(lock, flags);
102122@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
102123
102124 static void print_name_offset(struct seq_file *m, unsigned long addr)
102125 {
102126+#ifdef CONFIG_GRKERNSEC_HIDESYM
102127+ seq_printf(m, "<%p>", NULL);
102128+#else
102129 char symname[KSYM_NAME_LEN];
102130
102131 if (lookup_symbol_name(addr, symname) < 0)
102132- seq_printf(m, "<%p>", (void *)addr);
102133+ seq_printf(m, "<%pK>", (void *)addr);
102134 else
102135 seq_printf(m, "%s", symname);
102136+#endif
102137 }
102138
102139 static int tstats_show(struct seq_file *m, void *v)
102140@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
102141
102142 seq_puts(m, "Timer Stats Version: v0.3\n");
102143 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
102144- if (atomic_read(&overflow_count))
102145- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
102146+ if (atomic_read_unchecked(&overflow_count))
102147+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
102148 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
102149
102150 for (i = 0; i < nr_entries; i++) {
102151@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
102152 {
102153 struct proc_dir_entry *pe;
102154
102155+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102156+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
102157+#else
102158 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
102159+#endif
102160 if (!pe)
102161 return -ENOMEM;
102162 return 0;
102163diff --git a/kernel/torture.c b/kernel/torture.c
102164index dd70993..0bf694b 100644
102165--- a/kernel/torture.c
102166+++ b/kernel/torture.c
102167@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
102168 mutex_lock(&fullstop_mutex);
102169 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
102170 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
102171- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
102172+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
102173 } else {
102174 pr_warn("Concurrent rmmod and shutdown illegal!\n");
102175 }
102176@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
102177 if (!torture_must_stop()) {
102178 if (stutter > 1) {
102179 schedule_timeout_interruptible(stutter - 1);
102180- ACCESS_ONCE(stutter_pause_test) = 2;
102181+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
102182 }
102183 schedule_timeout_interruptible(1);
102184- ACCESS_ONCE(stutter_pause_test) = 1;
102185+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
102186 }
102187 if (!torture_must_stop())
102188 schedule_timeout_interruptible(stutter);
102189- ACCESS_ONCE(stutter_pause_test) = 0;
102190+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
102191 torture_shutdown_absorb("torture_stutter");
102192 } while (!torture_must_stop());
102193 torture_kthread_stopping("torture_stutter");
102194@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
102195 schedule_timeout_uninterruptible(10);
102196 return true;
102197 }
102198- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
102199+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
102200 mutex_unlock(&fullstop_mutex);
102201 torture_shutdown_cleanup();
102202 torture_shuffle_cleanup();
102203diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
102204index 483cecf..ac46091 100644
102205--- a/kernel/trace/blktrace.c
102206+++ b/kernel/trace/blktrace.c
102207@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
102208 struct blk_trace *bt = filp->private_data;
102209 char buf[16];
102210
102211- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
102212+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
102213
102214 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
102215 }
102216@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
102217 return 1;
102218
102219 bt = buf->chan->private_data;
102220- atomic_inc(&bt->dropped);
102221+ atomic_inc_unchecked(&bt->dropped);
102222 return 0;
102223 }
102224
102225@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
102226
102227 bt->dir = dir;
102228 bt->dev = dev;
102229- atomic_set(&bt->dropped, 0);
102230+ atomic_set_unchecked(&bt->dropped, 0);
102231 INIT_LIST_HEAD(&bt->running_list);
102232
102233 ret = -EIO;
102234diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
102235index 4f22802..bd268b1 100644
102236--- a/kernel/trace/ftrace.c
102237+++ b/kernel/trace/ftrace.c
102238@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
102239 if (unlikely(ftrace_disabled))
102240 return 0;
102241
102242+ ret = ftrace_arch_code_modify_prepare();
102243+ FTRACE_WARN_ON(ret);
102244+ if (ret)
102245+ return 0;
102246+
102247 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
102248+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
102249 if (ret) {
102250 ftrace_bug(ret, rec);
102251- return 0;
102252 }
102253- return 1;
102254+ return ret ? 0 : 1;
102255 }
102256
102257 /*
102258@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
102259 if (!count)
102260 return 0;
102261
102262+ pax_open_kernel();
102263 sort(start, count, sizeof(*start),
102264 ftrace_cmp_ips, ftrace_swap_ips);
102265+ pax_close_kernel();
102266
102267 start_pg = ftrace_allocate_pages(count);
102268 if (!start_pg)
102269@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
102270
102271 if (t->ret_stack == NULL) {
102272 atomic_set(&t->tracing_graph_pause, 0);
102273- atomic_set(&t->trace_overrun, 0);
102274+ atomic_set_unchecked(&t->trace_overrun, 0);
102275 t->curr_ret_stack = -1;
102276 /* Make sure the tasks see the -1 first: */
102277 smp_wmb();
102278@@ -5876,7 +5883,7 @@ static void
102279 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
102280 {
102281 atomic_set(&t->tracing_graph_pause, 0);
102282- atomic_set(&t->trace_overrun, 0);
102283+ atomic_set_unchecked(&t->trace_overrun, 0);
102284 t->ftrace_timestamp = 0;
102285 /* make curr_ret_stack visible before we add the ret_stack */
102286 smp_wmb();
102287diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
102288index 922048a..bb71a55 100644
102289--- a/kernel/trace/ring_buffer.c
102290+++ b/kernel/trace/ring_buffer.c
102291@@ -348,9 +348,9 @@ struct buffer_data_page {
102292 */
102293 struct buffer_page {
102294 struct list_head list; /* list of buffer pages */
102295- local_t write; /* index for next write */
102296+ local_unchecked_t write; /* index for next write */
102297 unsigned read; /* index for next read */
102298- local_t entries; /* entries on this page */
102299+ local_unchecked_t entries; /* entries on this page */
102300 unsigned long real_end; /* real end of data */
102301 struct buffer_data_page *page; /* Actual data page */
102302 };
102303@@ -471,11 +471,11 @@ struct ring_buffer_per_cpu {
102304 unsigned long last_overrun;
102305 local_t entries_bytes;
102306 local_t entries;
102307- local_t overrun;
102308- local_t commit_overrun;
102309- local_t dropped_events;
102310+ local_unchecked_t overrun;
102311+ local_unchecked_t commit_overrun;
102312+ local_unchecked_t dropped_events;
102313 local_t committing;
102314- local_t commits;
102315+ local_unchecked_t commits;
102316 unsigned long read;
102317 unsigned long read_bytes;
102318 u64 write_stamp;
102319@@ -1045,8 +1045,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
102320 *
102321 * We add a counter to the write field to denote this.
102322 */
102323- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
102324- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
102325+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
102326+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
102327
102328 /*
102329 * Just make sure we have seen our old_write and synchronize
102330@@ -1074,8 +1074,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
102331 * cmpxchg to only update if an interrupt did not already
102332 * do it for us. If the cmpxchg fails, we don't care.
102333 */
102334- (void)local_cmpxchg(&next_page->write, old_write, val);
102335- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
102336+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
102337+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
102338
102339 /*
102340 * No need to worry about races with clearing out the commit.
102341@@ -1443,12 +1443,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
102342
102343 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
102344 {
102345- return local_read(&bpage->entries) & RB_WRITE_MASK;
102346+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
102347 }
102348
102349 static inline unsigned long rb_page_write(struct buffer_page *bpage)
102350 {
102351- return local_read(&bpage->write) & RB_WRITE_MASK;
102352+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
102353 }
102354
102355 static int
102356@@ -1543,7 +1543,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
102357 * bytes consumed in ring buffer from here.
102358 * Increment overrun to account for the lost events.
102359 */
102360- local_add(page_entries, &cpu_buffer->overrun);
102361+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
102362 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
102363 }
102364
102365@@ -2105,7 +2105,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
102366 * it is our responsibility to update
102367 * the counters.
102368 */
102369- local_add(entries, &cpu_buffer->overrun);
102370+ local_add_unchecked(entries, &cpu_buffer->overrun);
102371 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
102372
102373 /*
102374@@ -2255,7 +2255,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102375 if (tail == BUF_PAGE_SIZE)
102376 tail_page->real_end = 0;
102377
102378- local_sub(length, &tail_page->write);
102379+ local_sub_unchecked(length, &tail_page->write);
102380 return;
102381 }
102382
102383@@ -2290,7 +2290,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102384 rb_event_set_padding(event);
102385
102386 /* Set the write back to the previous setting */
102387- local_sub(length, &tail_page->write);
102388+ local_sub_unchecked(length, &tail_page->write);
102389 return;
102390 }
102391
102392@@ -2302,7 +2302,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102393
102394 /* Set write to end of buffer */
102395 length = (tail + length) - BUF_PAGE_SIZE;
102396- local_sub(length, &tail_page->write);
102397+ local_sub_unchecked(length, &tail_page->write);
102398 }
102399
102400 /*
102401@@ -2328,7 +2328,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102402 * about it.
102403 */
102404 if (unlikely(next_page == commit_page)) {
102405- local_inc(&cpu_buffer->commit_overrun);
102406+ local_inc_unchecked(&cpu_buffer->commit_overrun);
102407 goto out_reset;
102408 }
102409
102410@@ -2358,7 +2358,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102411 * this is easy, just stop here.
102412 */
102413 if (!(buffer->flags & RB_FL_OVERWRITE)) {
102414- local_inc(&cpu_buffer->dropped_events);
102415+ local_inc_unchecked(&cpu_buffer->dropped_events);
102416 goto out_reset;
102417 }
102418
102419@@ -2384,7 +2384,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102420 cpu_buffer->tail_page) &&
102421 (cpu_buffer->commit_page ==
102422 cpu_buffer->reader_page))) {
102423- local_inc(&cpu_buffer->commit_overrun);
102424+ local_inc_unchecked(&cpu_buffer->commit_overrun);
102425 goto out_reset;
102426 }
102427 }
102428@@ -2432,7 +2432,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
102429 length += RB_LEN_TIME_EXTEND;
102430
102431 tail_page = cpu_buffer->tail_page;
102432- write = local_add_return(length, &tail_page->write);
102433+ write = local_add_return_unchecked(length, &tail_page->write);
102434
102435 /* set write to only the index of the write */
102436 write &= RB_WRITE_MASK;
102437@@ -2456,7 +2456,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
102438 kmemcheck_annotate_bitfield(event, bitfield);
102439 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
102440
102441- local_inc(&tail_page->entries);
102442+ local_inc_unchecked(&tail_page->entries);
102443
102444 /*
102445 * If this is the first commit on the page, then update
102446@@ -2489,7 +2489,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102447
102448 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
102449 unsigned long write_mask =
102450- local_read(&bpage->write) & ~RB_WRITE_MASK;
102451+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
102452 unsigned long event_length = rb_event_length(event);
102453 /*
102454 * This is on the tail page. It is possible that
102455@@ -2499,7 +2499,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102456 */
102457 old_index += write_mask;
102458 new_index += write_mask;
102459- index = local_cmpxchg(&bpage->write, old_index, new_index);
102460+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
102461 if (index == old_index) {
102462 /* update counters */
102463 local_sub(event_length, &cpu_buffer->entries_bytes);
102464@@ -2514,7 +2514,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102465 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
102466 {
102467 local_inc(&cpu_buffer->committing);
102468- local_inc(&cpu_buffer->commits);
102469+ local_inc_unchecked(&cpu_buffer->commits);
102470 }
102471
102472 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102473@@ -2526,7 +2526,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102474 return;
102475
102476 again:
102477- commits = local_read(&cpu_buffer->commits);
102478+ commits = local_read_unchecked(&cpu_buffer->commits);
102479 /* synchronize with interrupts */
102480 barrier();
102481 if (local_read(&cpu_buffer->committing) == 1)
102482@@ -2542,7 +2542,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102483 * updating of the commit page and the clearing of the
102484 * committing counter.
102485 */
102486- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
102487+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
102488 !local_read(&cpu_buffer->committing)) {
102489 local_inc(&cpu_buffer->committing);
102490 goto again;
102491@@ -2572,7 +2572,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
102492 barrier();
102493 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
102494 local_dec(&cpu_buffer->committing);
102495- local_dec(&cpu_buffer->commits);
102496+ local_dec_unchecked(&cpu_buffer->commits);
102497 return NULL;
102498 }
102499 #endif
102500@@ -2901,7 +2901,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
102501
102502 /* Do the likely case first */
102503 if (likely(bpage->page == (void *)addr)) {
102504- local_dec(&bpage->entries);
102505+ local_dec_unchecked(&bpage->entries);
102506 return;
102507 }
102508
102509@@ -2913,7 +2913,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
102510 start = bpage;
102511 do {
102512 if (bpage->page == (void *)addr) {
102513- local_dec(&bpage->entries);
102514+ local_dec_unchecked(&bpage->entries);
102515 return;
102516 }
102517 rb_inc_page(cpu_buffer, &bpage);
102518@@ -3197,7 +3197,7 @@ static inline unsigned long
102519 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
102520 {
102521 return local_read(&cpu_buffer->entries) -
102522- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
102523+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
102524 }
102525
102526 /**
102527@@ -3286,7 +3286,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
102528 return 0;
102529
102530 cpu_buffer = buffer->buffers[cpu];
102531- ret = local_read(&cpu_buffer->overrun);
102532+ ret = local_read_unchecked(&cpu_buffer->overrun);
102533
102534 return ret;
102535 }
102536@@ -3309,7 +3309,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
102537 return 0;
102538
102539 cpu_buffer = buffer->buffers[cpu];
102540- ret = local_read(&cpu_buffer->commit_overrun);
102541+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
102542
102543 return ret;
102544 }
102545@@ -3331,7 +3331,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
102546 return 0;
102547
102548 cpu_buffer = buffer->buffers[cpu];
102549- ret = local_read(&cpu_buffer->dropped_events);
102550+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
102551
102552 return ret;
102553 }
102554@@ -3394,7 +3394,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
102555 /* if you care about this being correct, lock the buffer */
102556 for_each_buffer_cpu(buffer, cpu) {
102557 cpu_buffer = buffer->buffers[cpu];
102558- overruns += local_read(&cpu_buffer->overrun);
102559+ overruns += local_read_unchecked(&cpu_buffer->overrun);
102560 }
102561
102562 return overruns;
102563@@ -3565,8 +3565,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
102564 /*
102565 * Reset the reader page to size zero.
102566 */
102567- local_set(&cpu_buffer->reader_page->write, 0);
102568- local_set(&cpu_buffer->reader_page->entries, 0);
102569+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
102570+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
102571 local_set(&cpu_buffer->reader_page->page->commit, 0);
102572 cpu_buffer->reader_page->real_end = 0;
102573
102574@@ -3600,7 +3600,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
102575 * want to compare with the last_overrun.
102576 */
102577 smp_mb();
102578- overwrite = local_read(&(cpu_buffer->overrun));
102579+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
102580
102581 /*
102582 * Here's the tricky part.
102583@@ -4172,8 +4172,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
102584
102585 cpu_buffer->head_page
102586 = list_entry(cpu_buffer->pages, struct buffer_page, list);
102587- local_set(&cpu_buffer->head_page->write, 0);
102588- local_set(&cpu_buffer->head_page->entries, 0);
102589+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
102590+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
102591 local_set(&cpu_buffer->head_page->page->commit, 0);
102592
102593 cpu_buffer->head_page->read = 0;
102594@@ -4183,18 +4183,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
102595
102596 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
102597 INIT_LIST_HEAD(&cpu_buffer->new_pages);
102598- local_set(&cpu_buffer->reader_page->write, 0);
102599- local_set(&cpu_buffer->reader_page->entries, 0);
102600+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
102601+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
102602 local_set(&cpu_buffer->reader_page->page->commit, 0);
102603 cpu_buffer->reader_page->read = 0;
102604
102605 local_set(&cpu_buffer->entries_bytes, 0);
102606- local_set(&cpu_buffer->overrun, 0);
102607- local_set(&cpu_buffer->commit_overrun, 0);
102608- local_set(&cpu_buffer->dropped_events, 0);
102609+ local_set_unchecked(&cpu_buffer->overrun, 0);
102610+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
102611+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
102612 local_set(&cpu_buffer->entries, 0);
102613 local_set(&cpu_buffer->committing, 0);
102614- local_set(&cpu_buffer->commits, 0);
102615+ local_set_unchecked(&cpu_buffer->commits, 0);
102616 cpu_buffer->read = 0;
102617 cpu_buffer->read_bytes = 0;
102618
102619@@ -4595,8 +4595,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
102620 rb_init_page(bpage);
102621 bpage = reader->page;
102622 reader->page = *data_page;
102623- local_set(&reader->write, 0);
102624- local_set(&reader->entries, 0);
102625+ local_set_unchecked(&reader->write, 0);
102626+ local_set_unchecked(&reader->entries, 0);
102627 reader->read = 0;
102628 *data_page = bpage;
102629
102630diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
102631index 62c6506..5c25989 100644
102632--- a/kernel/trace/trace.c
102633+++ b/kernel/trace/trace.c
102634@@ -3500,7 +3500,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
102635 return 0;
102636 }
102637
102638-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
102639+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
102640 {
102641 /* do nothing if flag is already set */
102642 if (!!(trace_flags & mask) == !!enabled)
102643diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
102644index dd8205a..1aae87a 100644
102645--- a/kernel/trace/trace.h
102646+++ b/kernel/trace/trace.h
102647@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
102648 void trace_printk_init_buffers(void);
102649 void trace_printk_start_comm(void);
102650 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
102651-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
102652+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
102653
102654 /*
102655 * Normal trace_printk() and friends allocates special buffers
102656diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
102657index 57b67b1..66082a9 100644
102658--- a/kernel/trace/trace_clock.c
102659+++ b/kernel/trace/trace_clock.c
102660@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
102661 return now;
102662 }
102663
102664-static atomic64_t trace_counter;
102665+static atomic64_unchecked_t trace_counter;
102666
102667 /*
102668 * trace_clock_counter(): simply an atomic counter.
102669@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
102670 */
102671 u64 notrace trace_clock_counter(void)
102672 {
102673- return atomic64_add_return(1, &trace_counter);
102674+ return atomic64_inc_return_unchecked(&trace_counter);
102675 }
102676diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
102677index a9c10a3..1864f6b 100644
102678--- a/kernel/trace/trace_events.c
102679+++ b/kernel/trace/trace_events.c
102680@@ -1762,7 +1762,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
102681 return 0;
102682 }
102683
102684-struct ftrace_module_file_ops;
102685 static void __add_event_to_tracers(struct ftrace_event_call *call);
102686
102687 /* Add an additional event_call dynamically */
102688diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
102689index b6fce36..d9f11a3 100644
102690--- a/kernel/trace/trace_functions_graph.c
102691+++ b/kernel/trace/trace_functions_graph.c
102692@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
102693
102694 /* The return trace stack is full */
102695 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
102696- atomic_inc(&current->trace_overrun);
102697+ atomic_inc_unchecked(&current->trace_overrun);
102698 return -EBUSY;
102699 }
102700
102701@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
102702 *ret = current->ret_stack[index].ret;
102703 trace->func = current->ret_stack[index].func;
102704 trace->calltime = current->ret_stack[index].calltime;
102705- trace->overrun = atomic_read(&current->trace_overrun);
102706+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
102707 trace->depth = index;
102708 }
102709
102710diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
102711index 7a9ba62..2e0e4a1 100644
102712--- a/kernel/trace/trace_mmiotrace.c
102713+++ b/kernel/trace/trace_mmiotrace.c
102714@@ -24,7 +24,7 @@ struct header_iter {
102715 static struct trace_array *mmio_trace_array;
102716 static bool overrun_detected;
102717 static unsigned long prev_overruns;
102718-static atomic_t dropped_count;
102719+static atomic_unchecked_t dropped_count;
102720
102721 static void mmio_reset_data(struct trace_array *tr)
102722 {
102723@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
102724
102725 static unsigned long count_overruns(struct trace_iterator *iter)
102726 {
102727- unsigned long cnt = atomic_xchg(&dropped_count, 0);
102728+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
102729 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
102730
102731 if (over > prev_overruns)
102732@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
102733 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
102734 sizeof(*entry), 0, pc);
102735 if (!event) {
102736- atomic_inc(&dropped_count);
102737+ atomic_inc_unchecked(&dropped_count);
102738 return;
102739 }
102740 entry = ring_buffer_event_data(event);
102741@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
102742 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
102743 sizeof(*entry), 0, pc);
102744 if (!event) {
102745- atomic_inc(&dropped_count);
102746+ atomic_inc_unchecked(&dropped_count);
102747 return;
102748 }
102749 entry = ring_buffer_event_data(event);
102750diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
102751index 692bf71..6d9a9cd 100644
102752--- a/kernel/trace/trace_output.c
102753+++ b/kernel/trace/trace_output.c
102754@@ -751,14 +751,16 @@ int register_ftrace_event(struct trace_event *event)
102755 goto out;
102756 }
102757
102758+ pax_open_kernel();
102759 if (event->funcs->trace == NULL)
102760- event->funcs->trace = trace_nop_print;
102761+ *(void **)&event->funcs->trace = trace_nop_print;
102762 if (event->funcs->raw == NULL)
102763- event->funcs->raw = trace_nop_print;
102764+ *(void **)&event->funcs->raw = trace_nop_print;
102765 if (event->funcs->hex == NULL)
102766- event->funcs->hex = trace_nop_print;
102767+ *(void **)&event->funcs->hex = trace_nop_print;
102768 if (event->funcs->binary == NULL)
102769- event->funcs->binary = trace_nop_print;
102770+ *(void **)&event->funcs->binary = trace_nop_print;
102771+ pax_close_kernel();
102772
102773 key = event->type & (EVENT_HASHSIZE - 1);
102774
102775diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
102776index e694c9f..6775a38 100644
102777--- a/kernel/trace/trace_seq.c
102778+++ b/kernel/trace/trace_seq.c
102779@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
102780 return 0;
102781 }
102782
102783- seq_buf_path(&s->seq, path, "\n");
102784+ seq_buf_path(&s->seq, path, "\n\\");
102785
102786 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
102787 s->seq.len = save_len;
102788diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
102789index c3e4fcf..ef6cc43 100644
102790--- a/kernel/trace/trace_stack.c
102791+++ b/kernel/trace/trace_stack.c
102792@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
102793 return;
102794
102795 /* we do not handle interrupt stacks yet */
102796- if (!object_is_on_stack(stack))
102797+ if (!object_starts_on_stack(stack))
102798 return;
102799
102800 local_irq_save(flags);
102801diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
102802index f97f6e3..d367b48 100644
102803--- a/kernel/trace/trace_syscalls.c
102804+++ b/kernel/trace/trace_syscalls.c
102805@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
102806 int num;
102807
102808 num = ((struct syscall_metadata *)call->data)->syscall_nr;
102809+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
102810+ return -EINVAL;
102811
102812 mutex_lock(&syscall_trace_lock);
102813 if (!sys_perf_refcount_enter)
102814@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
102815 int num;
102816
102817 num = ((struct syscall_metadata *)call->data)->syscall_nr;
102818+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
102819+ return;
102820
102821 mutex_lock(&syscall_trace_lock);
102822 sys_perf_refcount_enter--;
102823@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
102824 int num;
102825
102826 num = ((struct syscall_metadata *)call->data)->syscall_nr;
102827+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
102828+ return -EINVAL;
102829
102830 mutex_lock(&syscall_trace_lock);
102831 if (!sys_perf_refcount_exit)
102832@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
102833 int num;
102834
102835 num = ((struct syscall_metadata *)call->data)->syscall_nr;
102836+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
102837+ return;
102838
102839 mutex_lock(&syscall_trace_lock);
102840 sys_perf_refcount_exit--;
102841diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
102842index 4109f83..fe1f830 100644
102843--- a/kernel/user_namespace.c
102844+++ b/kernel/user_namespace.c
102845@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
102846 !kgid_has_mapping(parent_ns, group))
102847 return -EPERM;
102848
102849+#ifdef CONFIG_GRKERNSEC
102850+ /*
102851+ * This doesn't really inspire confidence:
102852+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
102853+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
102854+ * Increases kernel attack surface in areas developers
102855+ * previously cared little about ("low importance due
102856+ * to requiring "root" capability")
102857+ * To be removed when this code receives *proper* review
102858+ */
102859+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
102860+ !capable(CAP_SETGID))
102861+ return -EPERM;
102862+#endif
102863+
102864 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
102865 if (!ns)
102866 return -ENOMEM;
102867@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
102868 if (atomic_read(&current->mm->mm_users) > 1)
102869 return -EINVAL;
102870
102871- if (current->fs->users != 1)
102872+ if (atomic_read(&current->fs->users) != 1)
102873 return -EINVAL;
102874
102875 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
102876diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
102877index c8eac43..4b5f08f 100644
102878--- a/kernel/utsname_sysctl.c
102879+++ b/kernel/utsname_sysctl.c
102880@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
102881 static int proc_do_uts_string(struct ctl_table *table, int write,
102882 void __user *buffer, size_t *lenp, loff_t *ppos)
102883 {
102884- struct ctl_table uts_table;
102885+ ctl_table_no_const uts_table;
102886 int r;
102887 memcpy(&uts_table, table, sizeof(uts_table));
102888 uts_table.data = get_uts(table, write);
102889diff --git a/kernel/watchdog.c b/kernel/watchdog.c
102890index 3174bf8..3553520 100644
102891--- a/kernel/watchdog.c
102892+++ b/kernel/watchdog.c
102893@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
102894 static void watchdog_nmi_disable(unsigned int cpu) { return; }
102895 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
102896
102897-static struct smp_hotplug_thread watchdog_threads = {
102898+static struct smp_hotplug_thread watchdog_threads __read_only = {
102899 .store = &softlockup_watchdog,
102900 .thread_should_run = watchdog_should_run,
102901 .thread_fn = watchdog,
102902diff --git a/kernel/workqueue.c b/kernel/workqueue.c
102903index 41ff75b..5ad683a 100644
102904--- a/kernel/workqueue.c
102905+++ b/kernel/workqueue.c
102906@@ -4564,7 +4564,7 @@ static void rebind_workers(struct worker_pool *pool)
102907 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
102908 worker_flags |= WORKER_REBOUND;
102909 worker_flags &= ~WORKER_UNBOUND;
102910- ACCESS_ONCE(worker->flags) = worker_flags;
102911+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
102912 }
102913
102914 spin_unlock_irq(&pool->lock);
102915diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
102916index c5cefb3..a4241e3 100644
102917--- a/lib/Kconfig.debug
102918+++ b/lib/Kconfig.debug
102919@@ -923,7 +923,7 @@ config DEBUG_MUTEXES
102920
102921 config DEBUG_WW_MUTEX_SLOWPATH
102922 bool "Wait/wound mutex debugging: Slowpath testing"
102923- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
102924+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
102925 select DEBUG_LOCK_ALLOC
102926 select DEBUG_SPINLOCK
102927 select DEBUG_MUTEXES
102928@@ -940,7 +940,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
102929
102930 config DEBUG_LOCK_ALLOC
102931 bool "Lock debugging: detect incorrect freeing of live locks"
102932- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
102933+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
102934 select DEBUG_SPINLOCK
102935 select DEBUG_MUTEXES
102936 select LOCKDEP
102937@@ -954,7 +954,7 @@ config DEBUG_LOCK_ALLOC
102938
102939 config PROVE_LOCKING
102940 bool "Lock debugging: prove locking correctness"
102941- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
102942+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
102943 select LOCKDEP
102944 select DEBUG_SPINLOCK
102945 select DEBUG_MUTEXES
102946@@ -1005,7 +1005,7 @@ config LOCKDEP
102947
102948 config LOCK_STAT
102949 bool "Lock usage statistics"
102950- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
102951+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
102952 select LOCKDEP
102953 select DEBUG_SPINLOCK
102954 select DEBUG_MUTEXES
102955@@ -1467,6 +1467,7 @@ config LATENCYTOP
102956 depends on DEBUG_KERNEL
102957 depends on STACKTRACE_SUPPORT
102958 depends on PROC_FS
102959+ depends on !GRKERNSEC_HIDESYM
102960 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
102961 select KALLSYMS
102962 select KALLSYMS_ALL
102963@@ -1483,7 +1484,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
102964 config DEBUG_STRICT_USER_COPY_CHECKS
102965 bool "Strict user copy size checks"
102966 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
102967- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
102968+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
102969 help
102970 Enabling this option turns a certain set of sanity checks for user
102971 copy operations into compile time failures.
102972@@ -1614,7 +1615,7 @@ endmenu # runtime tests
102973
102974 config PROVIDE_OHCI1394_DMA_INIT
102975 bool "Remote debugging over FireWire early on boot"
102976- depends on PCI && X86
102977+ depends on PCI && X86 && !GRKERNSEC
102978 help
102979 If you want to debug problems which hang or crash the kernel early
102980 on boot and the crashing machine has a FireWire port, you can use
102981diff --git a/lib/Makefile b/lib/Makefile
102982index 58f74d2..08e011f 100644
102983--- a/lib/Makefile
102984+++ b/lib/Makefile
102985@@ -59,7 +59,7 @@ obj-$(CONFIG_BTREE) += btree.o
102986 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
102987 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
102988 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
102989-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
102990+obj-y += list_debug.o
102991 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
102992
102993 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
102994diff --git a/lib/average.c b/lib/average.c
102995index 114d1be..ab0350c 100644
102996--- a/lib/average.c
102997+++ b/lib/average.c
102998@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
102999 {
103000 unsigned long internal = ACCESS_ONCE(avg->internal);
103001
103002- ACCESS_ONCE(avg->internal) = internal ?
103003+ ACCESS_ONCE_RW(avg->internal) = internal ?
103004 (((internal << avg->weight) - internal) +
103005 (val << avg->factor)) >> avg->weight :
103006 (val << avg->factor);
103007diff --git a/lib/bitmap.c b/lib/bitmap.c
103008index d456f4c1..29a0308 100644
103009--- a/lib/bitmap.c
103010+++ b/lib/bitmap.c
103011@@ -264,7 +264,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
103012 }
103013 EXPORT_SYMBOL(__bitmap_subset);
103014
103015-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
103016+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
103017 {
103018 unsigned int k, lim = bits/BITS_PER_LONG;
103019 int w = 0;
103020@@ -391,7 +391,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
103021 {
103022 int c, old_c, totaldigits, ndigits, nchunks, nbits;
103023 u32 chunk;
103024- const char __user __force *ubuf = (const char __user __force *)buf;
103025+ const char __user *ubuf = (const char __force_user *)buf;
103026
103027 bitmap_zero(maskp, nmaskbits);
103028
103029@@ -476,7 +476,7 @@ int bitmap_parse_user(const char __user *ubuf,
103030 {
103031 if (!access_ok(VERIFY_READ, ubuf, ulen))
103032 return -EFAULT;
103033- return __bitmap_parse((const char __force *)ubuf,
103034+ return __bitmap_parse((const char __force_kernel *)ubuf,
103035 ulen, 1, maskp, nmaskbits);
103036
103037 }
103038@@ -535,7 +535,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
103039 {
103040 unsigned a, b;
103041 int c, old_c, totaldigits;
103042- const char __user __force *ubuf = (const char __user __force *)buf;
103043+ const char __user *ubuf = (const char __force_user *)buf;
103044 int exp_digit, in_range;
103045
103046 totaldigits = c = 0;
103047@@ -630,7 +630,7 @@ int bitmap_parselist_user(const char __user *ubuf,
103048 {
103049 if (!access_ok(VERIFY_READ, ubuf, ulen))
103050 return -EFAULT;
103051- return __bitmap_parselist((const char __force *)ubuf,
103052+ return __bitmap_parselist((const char __force_kernel *)ubuf,
103053 ulen, 1, maskp, nmaskbits);
103054 }
103055 EXPORT_SYMBOL(bitmap_parselist_user);
103056diff --git a/lib/bug.c b/lib/bug.c
103057index 0c3bd95..5a615a1 100644
103058--- a/lib/bug.c
103059+++ b/lib/bug.c
103060@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
103061 return BUG_TRAP_TYPE_NONE;
103062
103063 bug = find_bug(bugaddr);
103064+ if (!bug)
103065+ return BUG_TRAP_TYPE_NONE;
103066
103067 file = NULL;
103068 line = 0;
103069diff --git a/lib/debugobjects.c b/lib/debugobjects.c
103070index 547f7f9..a6d4ba0 100644
103071--- a/lib/debugobjects.c
103072+++ b/lib/debugobjects.c
103073@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
103074 if (limit > 4)
103075 return;
103076
103077- is_on_stack = object_is_on_stack(addr);
103078+ is_on_stack = object_starts_on_stack(addr);
103079 if (is_on_stack == onstack)
103080 return;
103081
103082diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
103083index 6dd0335..1e9c239 100644
103084--- a/lib/decompress_bunzip2.c
103085+++ b/lib/decompress_bunzip2.c
103086@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
103087
103088 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
103089 uncompressed data. Allocate intermediate buffer for block. */
103090- bd->dbufSize = 100000*(i-BZh0);
103091+ i -= BZh0;
103092+ bd->dbufSize = 100000 * i;
103093
103094 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
103095 if (!bd->dbuf)
103096diff --git a/lib/div64.c b/lib/div64.c
103097index 4382ad7..08aa558 100644
103098--- a/lib/div64.c
103099+++ b/lib/div64.c
103100@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
103101 EXPORT_SYMBOL(__div64_32);
103102
103103 #ifndef div_s64_rem
103104-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
103105+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
103106 {
103107 u64 quotient;
103108
103109@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
103110 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
103111 */
103112 #ifndef div64_u64
103113-u64 div64_u64(u64 dividend, u64 divisor)
103114+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
103115 {
103116 u32 high = divisor >> 32;
103117 u64 quot;
103118diff --git a/lib/dma-debug.c b/lib/dma-debug.c
103119index 9722bd2..0d826f4 100644
103120--- a/lib/dma-debug.c
103121+++ b/lib/dma-debug.c
103122@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
103123
103124 void dma_debug_add_bus(struct bus_type *bus)
103125 {
103126- struct notifier_block *nb;
103127+ notifier_block_no_const *nb;
103128
103129 if (dma_debug_disabled())
103130 return;
103131@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
103132
103133 static void check_for_stack(struct device *dev, void *addr)
103134 {
103135- if (object_is_on_stack(addr))
103136+ if (object_starts_on_stack(addr))
103137 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
103138 "stack [addr=%p]\n", addr);
103139 }
103140diff --git a/lib/inflate.c b/lib/inflate.c
103141index 013a761..c28f3fc 100644
103142--- a/lib/inflate.c
103143+++ b/lib/inflate.c
103144@@ -269,7 +269,7 @@ static void free(void *where)
103145 malloc_ptr = free_mem_ptr;
103146 }
103147 #else
103148-#define malloc(a) kmalloc(a, GFP_KERNEL)
103149+#define malloc(a) kmalloc((a), GFP_KERNEL)
103150 #define free(a) kfree(a)
103151 #endif
103152
103153diff --git a/lib/ioremap.c b/lib/ioremap.c
103154index 0c9216c..863bd89 100644
103155--- a/lib/ioremap.c
103156+++ b/lib/ioremap.c
103157@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
103158 unsigned long next;
103159
103160 phys_addr -= addr;
103161- pmd = pmd_alloc(&init_mm, pud, addr);
103162+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
103163 if (!pmd)
103164 return -ENOMEM;
103165 do {
103166@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
103167 unsigned long next;
103168
103169 phys_addr -= addr;
103170- pud = pud_alloc(&init_mm, pgd, addr);
103171+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
103172 if (!pud)
103173 return -ENOMEM;
103174 do {
103175diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
103176index bd2bea9..6b3c95e 100644
103177--- a/lib/is_single_threaded.c
103178+++ b/lib/is_single_threaded.c
103179@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
103180 struct task_struct *p, *t;
103181 bool ret;
103182
103183+ if (!mm)
103184+ return true;
103185+
103186 if (atomic_read(&task->signal->live) != 1)
103187 return false;
103188
103189diff --git a/lib/kobject.c b/lib/kobject.c
103190index 03d4ab3..46f6374 100644
103191--- a/lib/kobject.c
103192+++ b/lib/kobject.c
103193@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
103194
103195
103196 static DEFINE_SPINLOCK(kobj_ns_type_lock);
103197-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
103198+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
103199
103200-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
103201+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
103202 {
103203 enum kobj_ns_type type = ops->type;
103204 int error;
103205diff --git a/lib/list_debug.c b/lib/list_debug.c
103206index c24c2f7..f0296f4 100644
103207--- a/lib/list_debug.c
103208+++ b/lib/list_debug.c
103209@@ -11,7 +11,9 @@
103210 #include <linux/bug.h>
103211 #include <linux/kernel.h>
103212 #include <linux/rculist.h>
103213+#include <linux/mm.h>
103214
103215+#ifdef CONFIG_DEBUG_LIST
103216 /*
103217 * Insert a new entry between two known consecutive entries.
103218 *
103219@@ -19,21 +21,40 @@
103220 * the prev/next entries already!
103221 */
103222
103223+static bool __list_add_debug(struct list_head *new,
103224+ struct list_head *prev,
103225+ struct list_head *next)
103226+{
103227+ if (unlikely(next->prev != prev)) {
103228+ printk(KERN_ERR "list_add corruption. next->prev should be "
103229+ "prev (%p), but was %p. (next=%p).\n",
103230+ prev, next->prev, next);
103231+ BUG();
103232+ return false;
103233+ }
103234+ if (unlikely(prev->next != next)) {
103235+ printk(KERN_ERR "list_add corruption. prev->next should be "
103236+ "next (%p), but was %p. (prev=%p).\n",
103237+ next, prev->next, prev);
103238+ BUG();
103239+ return false;
103240+ }
103241+ if (unlikely(new == prev || new == next)) {
103242+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
103243+ new, prev, next);
103244+ BUG();
103245+ return false;
103246+ }
103247+ return true;
103248+}
103249+
103250 void __list_add(struct list_head *new,
103251- struct list_head *prev,
103252- struct list_head *next)
103253+ struct list_head *prev,
103254+ struct list_head *next)
103255 {
103256- WARN(next->prev != prev,
103257- "list_add corruption. next->prev should be "
103258- "prev (%p), but was %p. (next=%p).\n",
103259- prev, next->prev, next);
103260- WARN(prev->next != next,
103261- "list_add corruption. prev->next should be "
103262- "next (%p), but was %p. (prev=%p).\n",
103263- next, prev->next, prev);
103264- WARN(new == prev || new == next,
103265- "list_add double add: new=%p, prev=%p, next=%p.\n",
103266- new, prev, next);
103267+ if (!__list_add_debug(new, prev, next))
103268+ return;
103269+
103270 next->prev = new;
103271 new->next = next;
103272 new->prev = prev;
103273@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
103274 }
103275 EXPORT_SYMBOL(__list_add);
103276
103277-void __list_del_entry(struct list_head *entry)
103278+static bool __list_del_entry_debug(struct list_head *entry)
103279 {
103280 struct list_head *prev, *next;
103281
103282 prev = entry->prev;
103283 next = entry->next;
103284
103285- if (WARN(next == LIST_POISON1,
103286- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
103287- entry, LIST_POISON1) ||
103288- WARN(prev == LIST_POISON2,
103289- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
103290- entry, LIST_POISON2) ||
103291- WARN(prev->next != entry,
103292- "list_del corruption. prev->next should be %p, "
103293- "but was %p\n", entry, prev->next) ||
103294- WARN(next->prev != entry,
103295- "list_del corruption. next->prev should be %p, "
103296- "but was %p\n", entry, next->prev))
103297+ if (unlikely(next == LIST_POISON1)) {
103298+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
103299+ entry, LIST_POISON1);
103300+ BUG();
103301+ return false;
103302+ }
103303+ if (unlikely(prev == LIST_POISON2)) {
103304+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
103305+ entry, LIST_POISON2);
103306+ BUG();
103307+ return false;
103308+ }
103309+ if (unlikely(entry->prev->next != entry)) {
103310+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
103311+ "but was %p\n", entry, prev->next);
103312+ BUG();
103313+ return false;
103314+ }
103315+ if (unlikely(entry->next->prev != entry)) {
103316+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
103317+ "but was %p\n", entry, next->prev);
103318+ BUG();
103319+ return false;
103320+ }
103321+ return true;
103322+}
103323+
103324+void __list_del_entry(struct list_head *entry)
103325+{
103326+ if (!__list_del_entry_debug(entry))
103327 return;
103328
103329- __list_del(prev, next);
103330+ __list_del(entry->prev, entry->next);
103331 }
103332 EXPORT_SYMBOL(__list_del_entry);
103333
103334@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
103335 void __list_add_rcu(struct list_head *new,
103336 struct list_head *prev, struct list_head *next)
103337 {
103338- WARN(next->prev != prev,
103339- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
103340- prev, next->prev, next);
103341- WARN(prev->next != next,
103342- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
103343- next, prev->next, prev);
103344+ if (!__list_add_debug(new, prev, next))
103345+ return;
103346+
103347 new->next = next;
103348 new->prev = prev;
103349 rcu_assign_pointer(list_next_rcu(prev), new);
103350 next->prev = new;
103351 }
103352 EXPORT_SYMBOL(__list_add_rcu);
103353+#endif
103354+
103355+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
103356+{
103357+#ifdef CONFIG_DEBUG_LIST
103358+ if (!__list_add_debug(new, prev, next))
103359+ return;
103360+#endif
103361+
103362+ pax_open_kernel();
103363+ next->prev = new;
103364+ new->next = next;
103365+ new->prev = prev;
103366+ prev->next = new;
103367+ pax_close_kernel();
103368+}
103369+EXPORT_SYMBOL(__pax_list_add);
103370+
103371+void pax_list_del(struct list_head *entry)
103372+{
103373+#ifdef CONFIG_DEBUG_LIST
103374+ if (!__list_del_entry_debug(entry))
103375+ return;
103376+#endif
103377+
103378+ pax_open_kernel();
103379+ __list_del(entry->prev, entry->next);
103380+ entry->next = LIST_POISON1;
103381+ entry->prev = LIST_POISON2;
103382+ pax_close_kernel();
103383+}
103384+EXPORT_SYMBOL(pax_list_del);
103385+
103386+void pax_list_del_init(struct list_head *entry)
103387+{
103388+ pax_open_kernel();
103389+ __list_del(entry->prev, entry->next);
103390+ INIT_LIST_HEAD(entry);
103391+ pax_close_kernel();
103392+}
103393+EXPORT_SYMBOL(pax_list_del_init);
103394+
103395+void __pax_list_add_rcu(struct list_head *new,
103396+ struct list_head *prev, struct list_head *next)
103397+{
103398+#ifdef CONFIG_DEBUG_LIST
103399+ if (!__list_add_debug(new, prev, next))
103400+ return;
103401+#endif
103402+
103403+ pax_open_kernel();
103404+ new->next = next;
103405+ new->prev = prev;
103406+ rcu_assign_pointer(list_next_rcu(prev), new);
103407+ next->prev = new;
103408+ pax_close_kernel();
103409+}
103410+EXPORT_SYMBOL(__pax_list_add_rcu);
103411+
103412+void pax_list_del_rcu(struct list_head *entry)
103413+{
103414+#ifdef CONFIG_DEBUG_LIST
103415+ if (!__list_del_entry_debug(entry))
103416+ return;
103417+#endif
103418+
103419+ pax_open_kernel();
103420+ __list_del(entry->prev, entry->next);
103421+ entry->next = LIST_POISON1;
103422+ entry->prev = LIST_POISON2;
103423+ pax_close_kernel();
103424+}
103425+EXPORT_SYMBOL(pax_list_del_rcu);
103426diff --git a/lib/lockref.c b/lib/lockref.c
103427index ecb9a66..a044fc5 100644
103428--- a/lib/lockref.c
103429+++ b/lib/lockref.c
103430@@ -48,13 +48,13 @@
103431 void lockref_get(struct lockref *lockref)
103432 {
103433 CMPXCHG_LOOP(
103434- new.count++;
103435+ __lockref_inc(&new);
103436 ,
103437 return;
103438 );
103439
103440 spin_lock(&lockref->lock);
103441- lockref->count++;
103442+ __lockref_inc(lockref);
103443 spin_unlock(&lockref->lock);
103444 }
103445 EXPORT_SYMBOL(lockref_get);
103446@@ -69,8 +69,8 @@ int lockref_get_not_zero(struct lockref *lockref)
103447 int retval;
103448
103449 CMPXCHG_LOOP(
103450- new.count++;
103451- if (old.count <= 0)
103452+ __lockref_inc(&new);
103453+ if (__lockref_read(&old) <= 0)
103454 return 0;
103455 ,
103456 return 1;
103457@@ -78,8 +78,8 @@ int lockref_get_not_zero(struct lockref *lockref)
103458
103459 spin_lock(&lockref->lock);
103460 retval = 0;
103461- if (lockref->count > 0) {
103462- lockref->count++;
103463+ if (__lockref_read(lockref) > 0) {
103464+ __lockref_inc(lockref);
103465 retval = 1;
103466 }
103467 spin_unlock(&lockref->lock);
103468@@ -96,17 +96,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
103469 int lockref_get_or_lock(struct lockref *lockref)
103470 {
103471 CMPXCHG_LOOP(
103472- new.count++;
103473- if (old.count <= 0)
103474+ __lockref_inc(&new);
103475+ if (__lockref_read(&old) <= 0)
103476 break;
103477 ,
103478 return 1;
103479 );
103480
103481 spin_lock(&lockref->lock);
103482- if (lockref->count <= 0)
103483+ if (__lockref_read(lockref) <= 0)
103484 return 0;
103485- lockref->count++;
103486+ __lockref_inc(lockref);
103487 spin_unlock(&lockref->lock);
103488 return 1;
103489 }
103490@@ -122,11 +122,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
103491 int lockref_put_return(struct lockref *lockref)
103492 {
103493 CMPXCHG_LOOP(
103494- new.count--;
103495- if (old.count <= 0)
103496+ __lockref_dec(&new);
103497+ if (__lockref_read(&old) <= 0)
103498 return -1;
103499 ,
103500- return new.count;
103501+ return __lockref_read(&new);
103502 );
103503 return -1;
103504 }
103505@@ -140,17 +140,17 @@ EXPORT_SYMBOL(lockref_put_return);
103506 int lockref_put_or_lock(struct lockref *lockref)
103507 {
103508 CMPXCHG_LOOP(
103509- new.count--;
103510- if (old.count <= 1)
103511+ __lockref_dec(&new);
103512+ if (__lockref_read(&old) <= 1)
103513 break;
103514 ,
103515 return 1;
103516 );
103517
103518 spin_lock(&lockref->lock);
103519- if (lockref->count <= 1)
103520+ if (__lockref_read(lockref) <= 1)
103521 return 0;
103522- lockref->count--;
103523+ __lockref_dec(lockref);
103524 spin_unlock(&lockref->lock);
103525 return 1;
103526 }
103527@@ -163,7 +163,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
103528 void lockref_mark_dead(struct lockref *lockref)
103529 {
103530 assert_spin_locked(&lockref->lock);
103531- lockref->count = -128;
103532+ __lockref_set(lockref, -128);
103533 }
103534 EXPORT_SYMBOL(lockref_mark_dead);
103535
103536@@ -177,8 +177,8 @@ int lockref_get_not_dead(struct lockref *lockref)
103537 int retval;
103538
103539 CMPXCHG_LOOP(
103540- new.count++;
103541- if (old.count < 0)
103542+ __lockref_inc(&new);
103543+ if (__lockref_read(&old) < 0)
103544 return 0;
103545 ,
103546 return 1;
103547@@ -186,8 +186,8 @@ int lockref_get_not_dead(struct lockref *lockref)
103548
103549 spin_lock(&lockref->lock);
103550 retval = 0;
103551- if (lockref->count >= 0) {
103552- lockref->count++;
103553+ if (__lockref_read(lockref) >= 0) {
103554+ __lockref_inc(lockref);
103555 retval = 1;
103556 }
103557 spin_unlock(&lockref->lock);
103558diff --git a/lib/nlattr.c b/lib/nlattr.c
103559index f5907d2..36072be 100644
103560--- a/lib/nlattr.c
103561+++ b/lib/nlattr.c
103562@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
103563 {
103564 int minlen = min_t(int, count, nla_len(src));
103565
103566+ BUG_ON(minlen < 0);
103567+
103568 memcpy(dest, nla_data(src), minlen);
103569 if (count > minlen)
103570 memset(dest + minlen, 0, count - minlen);
103571diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
103572index 6111bcb..02e816b 100644
103573--- a/lib/percpu-refcount.c
103574+++ b/lib/percpu-refcount.c
103575@@ -31,7 +31,7 @@
103576 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
103577 */
103578
103579-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
103580+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
103581
103582 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
103583
103584diff --git a/lib/radix-tree.c b/lib/radix-tree.c
103585index 3d2aa27..a472f20 100644
103586--- a/lib/radix-tree.c
103587+++ b/lib/radix-tree.c
103588@@ -67,7 +67,7 @@ struct radix_tree_preload {
103589 int nr;
103590 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
103591 };
103592-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
103593+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
103594
103595 static inline void *ptr_to_indirect(void *ptr)
103596 {
103597diff --git a/lib/random32.c b/lib/random32.c
103598index 0bee183..526f12f 100644
103599--- a/lib/random32.c
103600+++ b/lib/random32.c
103601@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
103602 }
103603 #endif
103604
103605-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
103606+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
103607
103608 /**
103609 * prandom_u32_state - seeded pseudo-random number generator.
103610diff --git a/lib/rbtree.c b/lib/rbtree.c
103611index c16c81a..4dcbda1 100644
103612--- a/lib/rbtree.c
103613+++ b/lib/rbtree.c
103614@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
103615 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
103616
103617 static const struct rb_augment_callbacks dummy_callbacks = {
103618- dummy_propagate, dummy_copy, dummy_rotate
103619+ .propagate = dummy_propagate,
103620+ .copy = dummy_copy,
103621+ .rotate = dummy_rotate
103622 };
103623
103624 void rb_insert_color(struct rb_node *node, struct rb_root *root)
103625diff --git a/lib/show_mem.c b/lib/show_mem.c
103626index adc98e18..0ce83c2 100644
103627--- a/lib/show_mem.c
103628+++ b/lib/show_mem.c
103629@@ -49,6 +49,6 @@ void show_mem(unsigned int filter)
103630 quicklist_total_size());
103631 #endif
103632 #ifdef CONFIG_MEMORY_FAILURE
103633- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
103634+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
103635 #endif
103636 }
103637diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
103638index e0af6ff..fcc9f15 100644
103639--- a/lib/strncpy_from_user.c
103640+++ b/lib/strncpy_from_user.c
103641@@ -22,7 +22,7 @@
103642 */
103643 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
103644 {
103645- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103646+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103647 long res = 0;
103648
103649 /*
103650diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
103651index a28df52..3d55877 100644
103652--- a/lib/strnlen_user.c
103653+++ b/lib/strnlen_user.c
103654@@ -26,7 +26,7 @@
103655 */
103656 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
103657 {
103658- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103659+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
103660 long align, res = 0;
103661 unsigned long c;
103662
103663diff --git a/lib/swiotlb.c b/lib/swiotlb.c
103664index 4abda07..b9d3765 100644
103665--- a/lib/swiotlb.c
103666+++ b/lib/swiotlb.c
103667@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
103668
103669 void
103670 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
103671- dma_addr_t dev_addr)
103672+ dma_addr_t dev_addr, struct dma_attrs *attrs)
103673 {
103674 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
103675
103676diff --git a/lib/usercopy.c b/lib/usercopy.c
103677index 4f5b1dd..7cab418 100644
103678--- a/lib/usercopy.c
103679+++ b/lib/usercopy.c
103680@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
103681 WARN(1, "Buffer overflow detected!\n");
103682 }
103683 EXPORT_SYMBOL(copy_from_user_overflow);
103684+
103685+void copy_to_user_overflow(void)
103686+{
103687+ WARN(1, "Buffer overflow detected!\n");
103688+}
103689+EXPORT_SYMBOL(copy_to_user_overflow);
103690diff --git a/lib/vsprintf.c b/lib/vsprintf.c
103691index b235c96..343ffc1 100644
103692--- a/lib/vsprintf.c
103693+++ b/lib/vsprintf.c
103694@@ -16,6 +16,9 @@
103695 * - scnprintf and vscnprintf
103696 */
103697
103698+#ifdef CONFIG_GRKERNSEC_HIDESYM
103699+#define __INCLUDED_BY_HIDESYM 1
103700+#endif
103701 #include <stdarg.h>
103702 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
103703 #include <linux/types.h>
103704@@ -626,7 +629,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
103705 #ifdef CONFIG_KALLSYMS
103706 if (*fmt == 'B')
103707 sprint_backtrace(sym, value);
103708- else if (*fmt != 'f' && *fmt != 's')
103709+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
103710 sprint_symbol(sym, value);
103711 else
103712 sprint_symbol_no_offset(sym, value);
103713@@ -1322,7 +1325,11 @@ char *address_val(char *buf, char *end, const void *addr,
103714 return number(buf, end, num, spec);
103715 }
103716
103717+#ifdef CONFIG_GRKERNSEC_HIDESYM
103718+int kptr_restrict __read_mostly = 2;
103719+#else
103720 int kptr_restrict __read_mostly;
103721+#endif
103722
103723 /*
103724 * Show a '%p' thing. A kernel extension is that the '%p' is followed
103725@@ -1333,8 +1340,10 @@ int kptr_restrict __read_mostly;
103726 *
103727 * - 'F' For symbolic function descriptor pointers with offset
103728 * - 'f' For simple symbolic function names without offset
103729+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
103730 * - 'S' For symbolic direct pointers with offset
103731 * - 's' For symbolic direct pointers without offset
103732+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
103733 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
103734 * - 'B' For backtraced symbolic direct pointers with offset
103735 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
103736@@ -1417,12 +1426,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
103737
103738 if (!ptr && *fmt != 'K') {
103739 /*
103740- * Print (null) with the same width as a pointer so it makes
103741+ * Print (nil) with the same width as a pointer so it makes
103742 * tabular output look nice.
103743 */
103744 if (spec.field_width == -1)
103745 spec.field_width = default_width;
103746- return string(buf, end, "(null)", spec);
103747+ return string(buf, end, "(nil)", spec);
103748 }
103749
103750 switch (*fmt) {
103751@@ -1432,6 +1441,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
103752 /* Fallthrough */
103753 case 'S':
103754 case 's':
103755+#ifdef CONFIG_GRKERNSEC_HIDESYM
103756+ break;
103757+#else
103758+ return symbol_string(buf, end, ptr, spec, fmt);
103759+#endif
103760+ case 'X':
103761+ ptr = dereference_function_descriptor(ptr);
103762+ case 'A':
103763 case 'B':
103764 return symbol_string(buf, end, ptr, spec, fmt);
103765 case 'R':
103766@@ -1496,6 +1513,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
103767 va_end(va);
103768 return buf;
103769 }
103770+ case 'P':
103771+ break;
103772 case 'K':
103773 /*
103774 * %pK cannot be used in IRQ context because its test
103775@@ -1553,6 +1572,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
103776 ((const struct file *)ptr)->f_path.dentry,
103777 spec, fmt);
103778 }
103779+
103780+#ifdef CONFIG_GRKERNSEC_HIDESYM
103781+ /* 'P' = approved pointers to copy to userland,
103782+ as in the /proc/kallsyms case, as we make it display nothing
103783+ for non-root users, and the real contents for root users
103784+ 'X' = approved simple symbols
103785+ Also ignore 'K' pointers, since we force their NULLing for non-root users
103786+ above
103787+ */
103788+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
103789+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
103790+ dump_stack();
103791+ ptr = NULL;
103792+ }
103793+#endif
103794+
103795 spec.flags |= SMALL;
103796 if (spec.field_width == -1) {
103797 spec.field_width = default_width;
103798@@ -2254,11 +2289,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
103799 typeof(type) value; \
103800 if (sizeof(type) == 8) { \
103801 args = PTR_ALIGN(args, sizeof(u32)); \
103802- *(u32 *)&value = *(u32 *)args; \
103803- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
103804+ *(u32 *)&value = *(const u32 *)args; \
103805+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
103806 } else { \
103807 args = PTR_ALIGN(args, sizeof(type)); \
103808- value = *(typeof(type) *)args; \
103809+ value = *(const typeof(type) *)args; \
103810 } \
103811 args += sizeof(type); \
103812 value; \
103813@@ -2321,7 +2356,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
103814 case FORMAT_TYPE_STR: {
103815 const char *str_arg = args;
103816 args += strlen(str_arg) + 1;
103817- str = string(str, end, (char *)str_arg, spec);
103818+ str = string(str, end, str_arg, spec);
103819 break;
103820 }
103821
103822diff --git a/localversion-grsec b/localversion-grsec
103823new file mode 100644
103824index 0000000..7cd6065
103825--- /dev/null
103826+++ b/localversion-grsec
103827@@ -0,0 +1 @@
103828+-grsec
103829diff --git a/mm/Kconfig b/mm/Kconfig
103830index a03131b..1b1bafb 100644
103831--- a/mm/Kconfig
103832+++ b/mm/Kconfig
103833@@ -342,10 +342,11 @@ config KSM
103834 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
103835
103836 config DEFAULT_MMAP_MIN_ADDR
103837- int "Low address space to protect from user allocation"
103838+ int "Low address space to protect from user allocation"
103839 depends on MMU
103840- default 4096
103841- help
103842+ default 32768 if ALPHA || ARM || PARISC || SPARC32
103843+ default 65536
103844+ help
103845 This is the portion of low virtual memory which should be protected
103846 from userspace allocation. Keeping a user from writing to low pages
103847 can help reduce the impact of kernel NULL pointer bugs.
103848@@ -376,7 +377,7 @@ config MEMORY_FAILURE
103849
103850 config HWPOISON_INJECT
103851 tristate "HWPoison pages injector"
103852- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
103853+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
103854 select PROC_PAGE_MONITOR
103855
103856 config NOMMU_INITIAL_TRIM_EXCESS
103857diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
103858index 957d3da..1d34e20 100644
103859--- a/mm/Kconfig.debug
103860+++ b/mm/Kconfig.debug
103861@@ -10,6 +10,7 @@ config PAGE_EXTENSION
103862 config DEBUG_PAGEALLOC
103863 bool "Debug page memory allocations"
103864 depends on DEBUG_KERNEL
103865+ depends on !PAX_MEMORY_SANITIZE
103866 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
103867 depends on !KMEMCHECK
103868 select PAGE_EXTENSION
103869diff --git a/mm/backing-dev.c b/mm/backing-dev.c
103870index 6dc4580..e031ec1 100644
103871--- a/mm/backing-dev.c
103872+++ b/mm/backing-dev.c
103873@@ -12,7 +12,7 @@
103874 #include <linux/device.h>
103875 #include <trace/events/writeback.h>
103876
103877-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
103878+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
103879
103880 struct backing_dev_info noop_backing_dev_info = {
103881 .name = "noop",
103882@@ -474,7 +474,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
103883 return err;
103884
103885 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
103886- atomic_long_inc_return(&bdi_seq));
103887+ atomic_long_inc_return_unchecked(&bdi_seq));
103888 if (err) {
103889 bdi_destroy(bdi);
103890 return err;
103891diff --git a/mm/filemap.c b/mm/filemap.c
103892index ad72420..0a20ef2 100644
103893--- a/mm/filemap.c
103894+++ b/mm/filemap.c
103895@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
103896 struct address_space *mapping = file->f_mapping;
103897
103898 if (!mapping->a_ops->readpage)
103899- return -ENOEXEC;
103900+ return -ENODEV;
103901 file_accessed(file);
103902 vma->vm_ops = &generic_file_vm_ops;
103903 return 0;
103904@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
103905 *pos = i_size_read(inode);
103906
103907 if (limit != RLIM_INFINITY) {
103908+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
103909 if (*pos >= limit) {
103910 send_sig(SIGXFSZ, current, 0);
103911 return -EFBIG;
103912diff --git a/mm/gup.c b/mm/gup.c
103913index a6e24e2..72dd2cf 100644
103914--- a/mm/gup.c
103915+++ b/mm/gup.c
103916@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
103917 unsigned int fault_flags = 0;
103918 int ret;
103919
103920- /* For mlock, just skip the stack guard page. */
103921- if ((*flags & FOLL_MLOCK) &&
103922- (stack_guard_page_start(vma, address) ||
103923- stack_guard_page_end(vma, address + PAGE_SIZE)))
103924- return -ENOENT;
103925 if (*flags & FOLL_WRITE)
103926 fault_flags |= FAULT_FLAG_WRITE;
103927 if (nonblocking)
103928@@ -435,14 +430,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
103929 if (!(gup_flags & FOLL_FORCE))
103930 gup_flags |= FOLL_NUMA;
103931
103932- do {
103933+ while (nr_pages) {
103934 struct page *page;
103935 unsigned int foll_flags = gup_flags;
103936 unsigned int page_increm;
103937
103938 /* first iteration or cross vma bound */
103939 if (!vma || start >= vma->vm_end) {
103940- vma = find_extend_vma(mm, start);
103941+ vma = find_vma(mm, start);
103942 if (!vma && in_gate_area(mm, start)) {
103943 int ret;
103944 ret = get_gate_page(mm, start & PAGE_MASK,
103945@@ -454,7 +449,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
103946 goto next_page;
103947 }
103948
103949- if (!vma || check_vma_flags(vma, gup_flags))
103950+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
103951 return i ? : -EFAULT;
103952 if (is_vm_hugetlb_page(vma)) {
103953 i = follow_hugetlb_page(mm, vma, pages, vmas,
103954@@ -509,7 +504,7 @@ next_page:
103955 i += page_increm;
103956 start += page_increm * PAGE_SIZE;
103957 nr_pages -= page_increm;
103958- } while (nr_pages);
103959+ }
103960 return i;
103961 }
103962 EXPORT_SYMBOL(__get_user_pages);
103963diff --git a/mm/highmem.c b/mm/highmem.c
103964index 123bcd3..0de52ba 100644
103965--- a/mm/highmem.c
103966+++ b/mm/highmem.c
103967@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
103968 * So no dangers, even with speculative execution.
103969 */
103970 page = pte_page(pkmap_page_table[i]);
103971+ pax_open_kernel();
103972 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
103973-
103974+ pax_close_kernel();
103975 set_page_address(page, NULL);
103976 need_flush = 1;
103977 }
103978@@ -259,9 +260,11 @@ start:
103979 }
103980 }
103981 vaddr = PKMAP_ADDR(last_pkmap_nr);
103982+
103983+ pax_open_kernel();
103984 set_pte_at(&init_mm, vaddr,
103985 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
103986-
103987+ pax_close_kernel();
103988 pkmap_count[last_pkmap_nr] = 1;
103989 set_page_address(page, (void *)vaddr);
103990
103991diff --git a/mm/hugetlb.c b/mm/hugetlb.c
103992index caad3c5..4f68807 100644
103993--- a/mm/hugetlb.c
103994+++ b/mm/hugetlb.c
103995@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
103996 struct ctl_table *table, int write,
103997 void __user *buffer, size_t *length, loff_t *ppos)
103998 {
103999+ ctl_table_no_const t;
104000 struct hstate *h = &default_hstate;
104001 unsigned long tmp = h->max_huge_pages;
104002 int ret;
104003@@ -2267,9 +2268,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
104004 if (!hugepages_supported())
104005 return -ENOTSUPP;
104006
104007- table->data = &tmp;
104008- table->maxlen = sizeof(unsigned long);
104009- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
104010+ t = *table;
104011+ t.data = &tmp;
104012+ t.maxlen = sizeof(unsigned long);
104013+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
104014 if (ret)
104015 goto out;
104016
104017@@ -2304,6 +2306,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
104018 struct hstate *h = &default_hstate;
104019 unsigned long tmp;
104020 int ret;
104021+ ctl_table_no_const hugetlb_table;
104022
104023 if (!hugepages_supported())
104024 return -ENOTSUPP;
104025@@ -2313,9 +2316,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
104026 if (write && hstate_is_gigantic(h))
104027 return -EINVAL;
104028
104029- table->data = &tmp;
104030- table->maxlen = sizeof(unsigned long);
104031- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
104032+ hugetlb_table = *table;
104033+ hugetlb_table.data = &tmp;
104034+ hugetlb_table.maxlen = sizeof(unsigned long);
104035+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
104036 if (ret)
104037 goto out;
104038
104039@@ -2800,6 +2804,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
104040 i_mmap_unlock_write(mapping);
104041 }
104042
104043+#ifdef CONFIG_PAX_SEGMEXEC
104044+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
104045+{
104046+ struct mm_struct *mm = vma->vm_mm;
104047+ struct vm_area_struct *vma_m;
104048+ unsigned long address_m;
104049+ pte_t *ptep_m;
104050+
104051+ vma_m = pax_find_mirror_vma(vma);
104052+ if (!vma_m)
104053+ return;
104054+
104055+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104056+ address_m = address + SEGMEXEC_TASK_SIZE;
104057+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
104058+ get_page(page_m);
104059+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
104060+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
104061+}
104062+#endif
104063+
104064 /*
104065 * Hugetlb_cow() should be called with page lock of the original hugepage held.
104066 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
104067@@ -2912,6 +2937,11 @@ retry_avoidcopy:
104068 make_huge_pte(vma, new_page, 1));
104069 page_remove_rmap(old_page);
104070 hugepage_add_new_anon_rmap(new_page, vma, address);
104071+
104072+#ifdef CONFIG_PAX_SEGMEXEC
104073+ pax_mirror_huge_pte(vma, address, new_page);
104074+#endif
104075+
104076 /* Make the old page be freed below */
104077 new_page = old_page;
104078 }
104079@@ -3072,6 +3102,10 @@ retry:
104080 && (vma->vm_flags & VM_SHARED)));
104081 set_huge_pte_at(mm, address, ptep, new_pte);
104082
104083+#ifdef CONFIG_PAX_SEGMEXEC
104084+ pax_mirror_huge_pte(vma, address, page);
104085+#endif
104086+
104087 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
104088 /* Optimization, do the COW without a second fault */
104089 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
104090@@ -3139,6 +3173,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104091 struct address_space *mapping;
104092 int need_wait_lock = 0;
104093
104094+#ifdef CONFIG_PAX_SEGMEXEC
104095+ struct vm_area_struct *vma_m;
104096+#endif
104097+
104098 address &= huge_page_mask(h);
104099
104100 ptep = huge_pte_offset(mm, address);
104101@@ -3152,6 +3190,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104102 VM_FAULT_SET_HINDEX(hstate_index(h));
104103 }
104104
104105+#ifdef CONFIG_PAX_SEGMEXEC
104106+ vma_m = pax_find_mirror_vma(vma);
104107+ if (vma_m) {
104108+ unsigned long address_m;
104109+
104110+ if (vma->vm_start > vma_m->vm_start) {
104111+ address_m = address;
104112+ address -= SEGMEXEC_TASK_SIZE;
104113+ vma = vma_m;
104114+ h = hstate_vma(vma);
104115+ } else
104116+ address_m = address + SEGMEXEC_TASK_SIZE;
104117+
104118+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
104119+ return VM_FAULT_OOM;
104120+ address_m &= HPAGE_MASK;
104121+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
104122+ }
104123+#endif
104124+
104125 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
104126 if (!ptep)
104127 return VM_FAULT_OOM;
104128diff --git a/mm/internal.h b/mm/internal.h
104129index a96da5b..42ebd54 100644
104130--- a/mm/internal.h
104131+++ b/mm/internal.h
104132@@ -156,6 +156,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
104133
104134 extern int __isolate_free_page(struct page *page, unsigned int order);
104135 extern void __free_pages_bootmem(struct page *page, unsigned int order);
104136+extern void free_compound_page(struct page *page);
104137 extern void prep_compound_page(struct page *page, unsigned long order);
104138 #ifdef CONFIG_MEMORY_FAILURE
104139 extern bool is_free_buddy_page(struct page *page);
104140@@ -411,7 +412,7 @@ extern u32 hwpoison_filter_enable;
104141
104142 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
104143 unsigned long, unsigned long,
104144- unsigned long, unsigned long);
104145+ unsigned long, unsigned long) __intentional_overflow(-1);
104146
104147 extern void set_pageblock_order(void);
104148 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
104149diff --git a/mm/kmemleak.c b/mm/kmemleak.c
104150index 5405aff..483406d 100644
104151--- a/mm/kmemleak.c
104152+++ b/mm/kmemleak.c
104153@@ -365,7 +365,7 @@ static void print_unreferenced(struct seq_file *seq,
104154
104155 for (i = 0; i < object->trace_len; i++) {
104156 void *ptr = (void *)object->trace[i];
104157- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
104158+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
104159 }
104160 }
104161
104162@@ -1911,7 +1911,7 @@ static int __init kmemleak_late_init(void)
104163 return -ENOMEM;
104164 }
104165
104166- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
104167+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
104168 &kmemleak_fops);
104169 if (!dentry)
104170 pr_warning("Failed to create the debugfs kmemleak file\n");
104171diff --git a/mm/maccess.c b/mm/maccess.c
104172index d53adf9..03a24bf 100644
104173--- a/mm/maccess.c
104174+++ b/mm/maccess.c
104175@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
104176 set_fs(KERNEL_DS);
104177 pagefault_disable();
104178 ret = __copy_from_user_inatomic(dst,
104179- (__force const void __user *)src, size);
104180+ (const void __force_user *)src, size);
104181 pagefault_enable();
104182 set_fs(old_fs);
104183
104184@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
104185
104186 set_fs(KERNEL_DS);
104187 pagefault_disable();
104188- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
104189+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
104190 pagefault_enable();
104191 set_fs(old_fs);
104192
104193diff --git a/mm/madvise.c b/mm/madvise.c
104194index d551475..8fdd7f3 100644
104195--- a/mm/madvise.c
104196+++ b/mm/madvise.c
104197@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
104198 pgoff_t pgoff;
104199 unsigned long new_flags = vma->vm_flags;
104200
104201+#ifdef CONFIG_PAX_SEGMEXEC
104202+ struct vm_area_struct *vma_m;
104203+#endif
104204+
104205 switch (behavior) {
104206 case MADV_NORMAL:
104207 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
104208@@ -126,6 +130,13 @@ success:
104209 /*
104210 * vm_flags is protected by the mmap_sem held in write mode.
104211 */
104212+
104213+#ifdef CONFIG_PAX_SEGMEXEC
104214+ vma_m = pax_find_mirror_vma(vma);
104215+ if (vma_m)
104216+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
104217+#endif
104218+
104219 vma->vm_flags = new_flags;
104220
104221 out:
104222@@ -277,11 +288,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
104223 struct vm_area_struct **prev,
104224 unsigned long start, unsigned long end)
104225 {
104226+
104227+#ifdef CONFIG_PAX_SEGMEXEC
104228+ struct vm_area_struct *vma_m;
104229+#endif
104230+
104231 *prev = vma;
104232 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
104233 return -EINVAL;
104234
104235 zap_page_range(vma, start, end - start, NULL);
104236+
104237+#ifdef CONFIG_PAX_SEGMEXEC
104238+ vma_m = pax_find_mirror_vma(vma);
104239+ if (vma_m) {
104240+ if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
104241+ return -EINVAL;
104242+
104243+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
104244+ }
104245+#endif
104246+
104247 return 0;
104248 }
104249
104250@@ -484,6 +511,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
104251 if (end < start)
104252 return error;
104253
104254+#ifdef CONFIG_PAX_SEGMEXEC
104255+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
104256+ if (end > SEGMEXEC_TASK_SIZE)
104257+ return error;
104258+ } else
104259+#endif
104260+
104261+ if (end > TASK_SIZE)
104262+ return error;
104263+
104264 error = 0;
104265 if (end == start)
104266 return error;
104267diff --git a/mm/memory-failure.c b/mm/memory-failure.c
104268index 72a5224..51ba846 100644
104269--- a/mm/memory-failure.c
104270+++ b/mm/memory-failure.c
104271@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
104272
104273 int sysctl_memory_failure_recovery __read_mostly = 1;
104274
104275-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
104276+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
104277
104278 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
104279
104280@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
104281 pfn, t->comm, t->pid);
104282 si.si_signo = SIGBUS;
104283 si.si_errno = 0;
104284- si.si_addr = (void *)addr;
104285+ si.si_addr = (void __user *)addr;
104286 #ifdef __ARCH_SI_TRAPNO
104287 si.si_trapno = trapno;
104288 #endif
104289@@ -779,7 +779,7 @@ static struct page_state {
104290 unsigned long res;
104291 char *msg;
104292 int (*action)(struct page *p, unsigned long pfn);
104293-} error_states[] = {
104294+} __do_const error_states[] = {
104295 { reserved, reserved, "reserved kernel", me_kernel },
104296 /*
104297 * free pages are specially detected outside this table:
104298@@ -1087,7 +1087,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104299 nr_pages = 1 << compound_order(hpage);
104300 else /* normal page or thp */
104301 nr_pages = 1;
104302- atomic_long_add(nr_pages, &num_poisoned_pages);
104303+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
104304
104305 /*
104306 * We need/can do nothing about count=0 pages.
104307@@ -1116,7 +1116,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104308 if (PageHWPoison(hpage)) {
104309 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
104310 || (p != hpage && TestSetPageHWPoison(hpage))) {
104311- atomic_long_sub(nr_pages, &num_poisoned_pages);
104312+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104313 unlock_page(hpage);
104314 return 0;
104315 }
104316@@ -1184,14 +1184,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104317 */
104318 if (!PageHWPoison(p)) {
104319 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
104320- atomic_long_sub(nr_pages, &num_poisoned_pages);
104321+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104322 put_page(hpage);
104323 res = 0;
104324 goto out;
104325 }
104326 if (hwpoison_filter(p)) {
104327 if (TestClearPageHWPoison(p))
104328- atomic_long_sub(nr_pages, &num_poisoned_pages);
104329+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104330 unlock_page(hpage);
104331 put_page(hpage);
104332 return 0;
104333@@ -1421,7 +1421,7 @@ int unpoison_memory(unsigned long pfn)
104334 return 0;
104335 }
104336 if (TestClearPageHWPoison(p))
104337- atomic_long_dec(&num_poisoned_pages);
104338+ atomic_long_dec_unchecked(&num_poisoned_pages);
104339 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
104340 return 0;
104341 }
104342@@ -1435,7 +1435,7 @@ int unpoison_memory(unsigned long pfn)
104343 */
104344 if (TestClearPageHWPoison(page)) {
104345 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
104346- atomic_long_sub(nr_pages, &num_poisoned_pages);
104347+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104348 freeit = 1;
104349 if (PageHuge(page))
104350 clear_page_hwpoison_huge_page(page);
104351@@ -1560,11 +1560,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
104352 if (PageHuge(page)) {
104353 set_page_hwpoison_huge_page(hpage);
104354 dequeue_hwpoisoned_huge_page(hpage);
104355- atomic_long_add(1 << compound_order(hpage),
104356+ atomic_long_add_unchecked(1 << compound_order(hpage),
104357 &num_poisoned_pages);
104358 } else {
104359 SetPageHWPoison(page);
104360- atomic_long_inc(&num_poisoned_pages);
104361+ atomic_long_inc_unchecked(&num_poisoned_pages);
104362 }
104363 }
104364 return ret;
104365@@ -1603,7 +1603,7 @@ static int __soft_offline_page(struct page *page, int flags)
104366 put_page(page);
104367 pr_info("soft_offline: %#lx: invalidated\n", pfn);
104368 SetPageHWPoison(page);
104369- atomic_long_inc(&num_poisoned_pages);
104370+ atomic_long_inc_unchecked(&num_poisoned_pages);
104371 return 0;
104372 }
104373
104374@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
104375 if (!is_free_buddy_page(page))
104376 pr_info("soft offline: %#lx: page leaked\n",
104377 pfn);
104378- atomic_long_inc(&num_poisoned_pages);
104379+ atomic_long_inc_unchecked(&num_poisoned_pages);
104380 }
104381 } else {
104382 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
104383@@ -1722,11 +1722,11 @@ int soft_offline_page(struct page *page, int flags)
104384 if (PageHuge(page)) {
104385 set_page_hwpoison_huge_page(hpage);
104386 if (!dequeue_hwpoisoned_huge_page(hpage))
104387- atomic_long_add(1 << compound_order(hpage),
104388+ atomic_long_add_unchecked(1 << compound_order(hpage),
104389 &num_poisoned_pages);
104390 } else {
104391 if (!TestSetPageHWPoison(page))
104392- atomic_long_inc(&num_poisoned_pages);
104393+ atomic_long_inc_unchecked(&num_poisoned_pages);
104394 }
104395 }
104396 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
104397diff --git a/mm/memory.c b/mm/memory.c
104398index 97839f5..4bc5530 100644
104399--- a/mm/memory.c
104400+++ b/mm/memory.c
104401@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
104402 free_pte_range(tlb, pmd, addr);
104403 } while (pmd++, addr = next, addr != end);
104404
104405+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
104406 start &= PUD_MASK;
104407 if (start < floor)
104408 return;
104409@@ -429,6 +430,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
104410 pud_clear(pud);
104411 pmd_free_tlb(tlb, pmd, start);
104412 mm_dec_nr_pmds(tlb->mm);
104413+#endif
104414 }
104415
104416 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104417@@ -448,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104418 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
104419 } while (pud++, addr = next, addr != end);
104420
104421+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
104422 start &= PGDIR_MASK;
104423 if (start < floor)
104424 return;
104425@@ -462,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104426 pud = pud_offset(pgd, start);
104427 pgd_clear(pgd);
104428 pud_free_tlb(tlb, pud, start);
104429+#endif
104430+
104431 }
104432
104433 /*
104434@@ -691,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
104435 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
104436 */
104437 if (vma->vm_ops)
104438- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
104439+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
104440 vma->vm_ops->fault);
104441 if (vma->vm_file)
104442- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
104443+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
104444 vma->vm_file->f_op->mmap);
104445 dump_stack();
104446 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
104447@@ -1464,6 +1469,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
104448 page_add_file_rmap(page);
104449 set_pte_at(mm, addr, pte, mk_pte(page, prot));
104450
104451+#ifdef CONFIG_PAX_SEGMEXEC
104452+ pax_mirror_file_pte(vma, addr, page, ptl);
104453+#endif
104454+
104455 retval = 0;
104456 pte_unmap_unlock(pte, ptl);
104457 return retval;
104458@@ -1508,9 +1517,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
104459 if (!page_count(page))
104460 return -EINVAL;
104461 if (!(vma->vm_flags & VM_MIXEDMAP)) {
104462+
104463+#ifdef CONFIG_PAX_SEGMEXEC
104464+ struct vm_area_struct *vma_m;
104465+#endif
104466+
104467 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
104468 BUG_ON(vma->vm_flags & VM_PFNMAP);
104469 vma->vm_flags |= VM_MIXEDMAP;
104470+
104471+#ifdef CONFIG_PAX_SEGMEXEC
104472+ vma_m = pax_find_mirror_vma(vma);
104473+ if (vma_m)
104474+ vma_m->vm_flags |= VM_MIXEDMAP;
104475+#endif
104476+
104477 }
104478 return insert_page(vma, addr, page, vma->vm_page_prot);
104479 }
104480@@ -1593,6 +1614,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
104481 unsigned long pfn)
104482 {
104483 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
104484+ BUG_ON(vma->vm_mirror);
104485
104486 if (addr < vma->vm_start || addr >= vma->vm_end)
104487 return -EFAULT;
104488@@ -1840,7 +1862,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
104489
104490 BUG_ON(pud_huge(*pud));
104491
104492- pmd = pmd_alloc(mm, pud, addr);
104493+ pmd = (mm == &init_mm) ?
104494+ pmd_alloc_kernel(mm, pud, addr) :
104495+ pmd_alloc(mm, pud, addr);
104496 if (!pmd)
104497 return -ENOMEM;
104498 do {
104499@@ -1860,7 +1884,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
104500 unsigned long next;
104501 int err;
104502
104503- pud = pud_alloc(mm, pgd, addr);
104504+ pud = (mm == &init_mm) ?
104505+ pud_alloc_kernel(mm, pgd, addr) :
104506+ pud_alloc(mm, pgd, addr);
104507 if (!pud)
104508 return -ENOMEM;
104509 do {
104510@@ -1982,6 +2008,185 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
104511 return ret;
104512 }
104513
104514+#ifdef CONFIG_PAX_SEGMEXEC
104515+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
104516+{
104517+ struct mm_struct *mm = vma->vm_mm;
104518+ spinlock_t *ptl;
104519+ pte_t *pte, entry;
104520+
104521+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
104522+ entry = *pte;
104523+ if (!pte_present(entry)) {
104524+ if (!pte_none(entry)) {
104525+ free_swap_and_cache(pte_to_swp_entry(entry));
104526+ pte_clear_not_present_full(mm, address, pte, 0);
104527+ }
104528+ } else {
104529+ struct page *page;
104530+
104531+ flush_cache_page(vma, address, pte_pfn(entry));
104532+ entry = ptep_clear_flush(vma, address, pte);
104533+ BUG_ON(pte_dirty(entry));
104534+ page = vm_normal_page(vma, address, entry);
104535+ if (page) {
104536+ update_hiwater_rss(mm);
104537+ if (PageAnon(page))
104538+ dec_mm_counter_fast(mm, MM_ANONPAGES);
104539+ else
104540+ dec_mm_counter_fast(mm, MM_FILEPAGES);
104541+ page_remove_rmap(page);
104542+ page_cache_release(page);
104543+ }
104544+ }
104545+ pte_unmap_unlock(pte, ptl);
104546+}
104547+
104548+/* PaX: if vma is mirrored, synchronize the mirror's PTE
104549+ *
104550+ * the ptl of the lower mapped page is held on entry and is not released on exit
104551+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
104552+ */
104553+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
104554+{
104555+ struct mm_struct *mm = vma->vm_mm;
104556+ unsigned long address_m;
104557+ spinlock_t *ptl_m;
104558+ struct vm_area_struct *vma_m;
104559+ pmd_t *pmd_m;
104560+ pte_t *pte_m, entry_m;
104561+
104562+ BUG_ON(!page_m || !PageAnon(page_m));
104563+
104564+ vma_m = pax_find_mirror_vma(vma);
104565+ if (!vma_m)
104566+ return;
104567+
104568+ BUG_ON(!PageLocked(page_m));
104569+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104570+ address_m = address + SEGMEXEC_TASK_SIZE;
104571+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
104572+ pte_m = pte_offset_map(pmd_m, address_m);
104573+ ptl_m = pte_lockptr(mm, pmd_m);
104574+ if (ptl != ptl_m) {
104575+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
104576+ if (!pte_none(*pte_m))
104577+ goto out;
104578+ }
104579+
104580+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
104581+ page_cache_get(page_m);
104582+ page_add_anon_rmap(page_m, vma_m, address_m);
104583+ inc_mm_counter_fast(mm, MM_ANONPAGES);
104584+ set_pte_at(mm, address_m, pte_m, entry_m);
104585+ update_mmu_cache(vma_m, address_m, pte_m);
104586+out:
104587+ if (ptl != ptl_m)
104588+ spin_unlock(ptl_m);
104589+ pte_unmap(pte_m);
104590+ unlock_page(page_m);
104591+}
104592+
104593+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
104594+{
104595+ struct mm_struct *mm = vma->vm_mm;
104596+ unsigned long address_m;
104597+ spinlock_t *ptl_m;
104598+ struct vm_area_struct *vma_m;
104599+ pmd_t *pmd_m;
104600+ pte_t *pte_m, entry_m;
104601+
104602+ BUG_ON(!page_m || PageAnon(page_m));
104603+
104604+ vma_m = pax_find_mirror_vma(vma);
104605+ if (!vma_m)
104606+ return;
104607+
104608+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104609+ address_m = address + SEGMEXEC_TASK_SIZE;
104610+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
104611+ pte_m = pte_offset_map(pmd_m, address_m);
104612+ ptl_m = pte_lockptr(mm, pmd_m);
104613+ if (ptl != ptl_m) {
104614+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
104615+ if (!pte_none(*pte_m))
104616+ goto out;
104617+ }
104618+
104619+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
104620+ page_cache_get(page_m);
104621+ page_add_file_rmap(page_m);
104622+ inc_mm_counter_fast(mm, MM_FILEPAGES);
104623+ set_pte_at(mm, address_m, pte_m, entry_m);
104624+ update_mmu_cache(vma_m, address_m, pte_m);
104625+out:
104626+ if (ptl != ptl_m)
104627+ spin_unlock(ptl_m);
104628+ pte_unmap(pte_m);
104629+}
104630+
104631+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
104632+{
104633+ struct mm_struct *mm = vma->vm_mm;
104634+ unsigned long address_m;
104635+ spinlock_t *ptl_m;
104636+ struct vm_area_struct *vma_m;
104637+ pmd_t *pmd_m;
104638+ pte_t *pte_m, entry_m;
104639+
104640+ vma_m = pax_find_mirror_vma(vma);
104641+ if (!vma_m)
104642+ return;
104643+
104644+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104645+ address_m = address + SEGMEXEC_TASK_SIZE;
104646+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
104647+ pte_m = pte_offset_map(pmd_m, address_m);
104648+ ptl_m = pte_lockptr(mm, pmd_m);
104649+ if (ptl != ptl_m) {
104650+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
104651+ if (!pte_none(*pte_m))
104652+ goto out;
104653+ }
104654+
104655+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
104656+ set_pte_at(mm, address_m, pte_m, entry_m);
104657+out:
104658+ if (ptl != ptl_m)
104659+ spin_unlock(ptl_m);
104660+ pte_unmap(pte_m);
104661+}
104662+
104663+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
104664+{
104665+ struct page *page_m;
104666+ pte_t entry;
104667+
104668+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
104669+ goto out;
104670+
104671+ entry = *pte;
104672+ page_m = vm_normal_page(vma, address, entry);
104673+ if (!page_m)
104674+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
104675+ else if (PageAnon(page_m)) {
104676+ if (pax_find_mirror_vma(vma)) {
104677+ pte_unmap_unlock(pte, ptl);
104678+ lock_page(page_m);
104679+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
104680+ if (pte_same(entry, *pte))
104681+ pax_mirror_anon_pte(vma, address, page_m, ptl);
104682+ else
104683+ unlock_page(page_m);
104684+ }
104685+ } else
104686+ pax_mirror_file_pte(vma, address, page_m, ptl);
104687+
104688+out:
104689+ pte_unmap_unlock(pte, ptl);
104690+}
104691+#endif
104692+
104693 /*
104694 * This routine handles present pages, when users try to write
104695 * to a shared page. It is done by copying the page to a new address
104696@@ -2172,6 +2377,12 @@ gotten:
104697 */
104698 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
104699 if (likely(pte_same(*page_table, orig_pte))) {
104700+
104701+#ifdef CONFIG_PAX_SEGMEXEC
104702+ if (pax_find_mirror_vma(vma))
104703+ BUG_ON(!trylock_page(new_page));
104704+#endif
104705+
104706 if (old_page) {
104707 if (!PageAnon(old_page)) {
104708 dec_mm_counter_fast(mm, MM_FILEPAGES);
104709@@ -2225,6 +2436,10 @@ gotten:
104710 page_remove_rmap(old_page);
104711 }
104712
104713+#ifdef CONFIG_PAX_SEGMEXEC
104714+ pax_mirror_anon_pte(vma, address, new_page, ptl);
104715+#endif
104716+
104717 /* Free the old page.. */
104718 new_page = old_page;
104719 ret |= VM_FAULT_WRITE;
104720@@ -2483,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
104721 swap_free(entry);
104722 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
104723 try_to_free_swap(page);
104724+
104725+#ifdef CONFIG_PAX_SEGMEXEC
104726+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
104727+#endif
104728+
104729 unlock_page(page);
104730 if (page != swapcache) {
104731 /*
104732@@ -2506,6 +2726,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
104733
104734 /* No need to invalidate - it was non-present before */
104735 update_mmu_cache(vma, address, page_table);
104736+
104737+#ifdef CONFIG_PAX_SEGMEXEC
104738+ pax_mirror_anon_pte(vma, address, page, ptl);
104739+#endif
104740+
104741 unlock:
104742 pte_unmap_unlock(page_table, ptl);
104743 out:
104744@@ -2525,40 +2750,6 @@ out_release:
104745 }
104746
104747 /*
104748- * This is like a special single-page "expand_{down|up}wards()",
104749- * except we must first make sure that 'address{-|+}PAGE_SIZE'
104750- * doesn't hit another vma.
104751- */
104752-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
104753-{
104754- address &= PAGE_MASK;
104755- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
104756- struct vm_area_struct *prev = vma->vm_prev;
104757-
104758- /*
104759- * Is there a mapping abutting this one below?
104760- *
104761- * That's only ok if it's the same stack mapping
104762- * that has gotten split..
104763- */
104764- if (prev && prev->vm_end == address)
104765- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
104766-
104767- return expand_downwards(vma, address - PAGE_SIZE);
104768- }
104769- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
104770- struct vm_area_struct *next = vma->vm_next;
104771-
104772- /* As VM_GROWSDOWN but s/below/above/ */
104773- if (next && next->vm_start == address + PAGE_SIZE)
104774- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
104775-
104776- return expand_upwards(vma, address + PAGE_SIZE);
104777- }
104778- return 0;
104779-}
104780-
104781-/*
104782 * We enter with non-exclusive mmap_sem (to exclude vma changes,
104783 * but allow concurrent faults), and pte mapped but not yet locked.
104784 * We return with mmap_sem still held, but pte unmapped and unlocked.
104785@@ -2568,27 +2759,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
104786 unsigned int flags)
104787 {
104788 struct mem_cgroup *memcg;
104789- struct page *page;
104790+ struct page *page = NULL;
104791 spinlock_t *ptl;
104792 pte_t entry;
104793
104794- pte_unmap(page_table);
104795-
104796- /* Check if we need to add a guard page to the stack */
104797- if (check_stack_guard_page(vma, address) < 0)
104798- return VM_FAULT_SIGSEGV;
104799-
104800- /* Use the zero-page for reads */
104801 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
104802 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
104803 vma->vm_page_prot));
104804- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
104805+ ptl = pte_lockptr(mm, pmd);
104806+ spin_lock(ptl);
104807 if (!pte_none(*page_table))
104808 goto unlock;
104809 goto setpte;
104810 }
104811
104812 /* Allocate our own private page. */
104813+ pte_unmap(page_table);
104814+
104815 if (unlikely(anon_vma_prepare(vma)))
104816 goto oom;
104817 page = alloc_zeroed_user_highpage_movable(vma, address);
104818@@ -2612,6 +2799,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
104819 if (!pte_none(*page_table))
104820 goto release;
104821
104822+#ifdef CONFIG_PAX_SEGMEXEC
104823+ if (pax_find_mirror_vma(vma))
104824+ BUG_ON(!trylock_page(page));
104825+#endif
104826+
104827 inc_mm_counter_fast(mm, MM_ANONPAGES);
104828 page_add_new_anon_rmap(page, vma, address);
104829 mem_cgroup_commit_charge(page, memcg, false);
104830@@ -2621,6 +2813,12 @@ setpte:
104831
104832 /* No need to invalidate - it was non-present before */
104833 update_mmu_cache(vma, address, page_table);
104834+
104835+#ifdef CONFIG_PAX_SEGMEXEC
104836+ if (page)
104837+ pax_mirror_anon_pte(vma, address, page, ptl);
104838+#endif
104839+
104840 unlock:
104841 pte_unmap_unlock(page_table, ptl);
104842 return 0;
104843@@ -2853,6 +3051,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104844 return ret;
104845 }
104846 do_set_pte(vma, address, fault_page, pte, false, false);
104847+
104848+#ifdef CONFIG_PAX_SEGMEXEC
104849+ pax_mirror_file_pte(vma, address, fault_page, ptl);
104850+#endif
104851+
104852 unlock_page(fault_page);
104853 unlock_out:
104854 pte_unmap_unlock(pte, ptl);
104855@@ -2904,7 +3107,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104856 }
104857 goto uncharge_out;
104858 }
104859+
104860+#ifdef CONFIG_PAX_SEGMEXEC
104861+ if (pax_find_mirror_vma(vma))
104862+ BUG_ON(!trylock_page(new_page));
104863+#endif
104864+
104865 do_set_pte(vma, address, new_page, pte, true, true);
104866+
104867+#ifdef CONFIG_PAX_SEGMEXEC
104868+ pax_mirror_anon_pte(vma, address, new_page, ptl);
104869+#endif
104870+
104871 mem_cgroup_commit_charge(new_page, memcg, false);
104872 lru_cache_add_active_or_unevictable(new_page, vma);
104873 pte_unmap_unlock(pte, ptl);
104874@@ -2962,6 +3176,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104875 return ret;
104876 }
104877 do_set_pte(vma, address, fault_page, pte, true, false);
104878+
104879+#ifdef CONFIG_PAX_SEGMEXEC
104880+ pax_mirror_file_pte(vma, address, fault_page, ptl);
104881+#endif
104882+
104883 pte_unmap_unlock(pte, ptl);
104884
104885 if (set_page_dirty(fault_page))
104886@@ -3185,6 +3404,12 @@ static int handle_pte_fault(struct mm_struct *mm,
104887 if (flags & FAULT_FLAG_WRITE)
104888 flush_tlb_fix_spurious_fault(vma, address);
104889 }
104890+
104891+#ifdef CONFIG_PAX_SEGMEXEC
104892+ pax_mirror_pte(vma, address, pte, pmd, ptl);
104893+ return 0;
104894+#endif
104895+
104896 unlock:
104897 pte_unmap_unlock(pte, ptl);
104898 return 0;
104899@@ -3204,9 +3429,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104900 pmd_t *pmd;
104901 pte_t *pte;
104902
104903+#ifdef CONFIG_PAX_SEGMEXEC
104904+ struct vm_area_struct *vma_m;
104905+#endif
104906+
104907 if (unlikely(is_vm_hugetlb_page(vma)))
104908 return hugetlb_fault(mm, vma, address, flags);
104909
104910+#ifdef CONFIG_PAX_SEGMEXEC
104911+ vma_m = pax_find_mirror_vma(vma);
104912+ if (vma_m) {
104913+ unsigned long address_m;
104914+ pgd_t *pgd_m;
104915+ pud_t *pud_m;
104916+ pmd_t *pmd_m;
104917+
104918+ if (vma->vm_start > vma_m->vm_start) {
104919+ address_m = address;
104920+ address -= SEGMEXEC_TASK_SIZE;
104921+ vma = vma_m;
104922+ } else
104923+ address_m = address + SEGMEXEC_TASK_SIZE;
104924+
104925+ pgd_m = pgd_offset(mm, address_m);
104926+ pud_m = pud_alloc(mm, pgd_m, address_m);
104927+ if (!pud_m)
104928+ return VM_FAULT_OOM;
104929+ pmd_m = pmd_alloc(mm, pud_m, address_m);
104930+ if (!pmd_m)
104931+ return VM_FAULT_OOM;
104932+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
104933+ return VM_FAULT_OOM;
104934+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
104935+ }
104936+#endif
104937+
104938 pgd = pgd_offset(mm, address);
104939 pud = pud_alloc(mm, pgd, address);
104940 if (!pud)
104941@@ -3341,6 +3598,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
104942 spin_unlock(&mm->page_table_lock);
104943 return 0;
104944 }
104945+
104946+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
104947+{
104948+ pud_t *new = pud_alloc_one(mm, address);
104949+ if (!new)
104950+ return -ENOMEM;
104951+
104952+ smp_wmb(); /* See comment in __pte_alloc */
104953+
104954+ spin_lock(&mm->page_table_lock);
104955+ if (pgd_present(*pgd)) /* Another has populated it */
104956+ pud_free(mm, new);
104957+ else
104958+ pgd_populate_kernel(mm, pgd, new);
104959+ spin_unlock(&mm->page_table_lock);
104960+ return 0;
104961+}
104962 #endif /* __PAGETABLE_PUD_FOLDED */
104963
104964 #ifndef __PAGETABLE_PMD_FOLDED
104965@@ -3373,6 +3647,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
104966 spin_unlock(&mm->page_table_lock);
104967 return 0;
104968 }
104969+
104970+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
104971+{
104972+ pmd_t *new = pmd_alloc_one(mm, address);
104973+ if (!new)
104974+ return -ENOMEM;
104975+
104976+ smp_wmb(); /* See comment in __pte_alloc */
104977+
104978+ spin_lock(&mm->page_table_lock);
104979+#ifndef __ARCH_HAS_4LEVEL_HACK
104980+ if (!pud_present(*pud)) {
104981+ mm_inc_nr_pmds(mm);
104982+ pud_populate_kernel(mm, pud, new);
104983+ } else /* Another has populated it */
104984+ pmd_free(mm, new);
104985+#else
104986+ if (!pgd_present(*pud)) {
104987+ mm_inc_nr_pmds(mm);
104988+ pgd_populate_kernel(mm, pud, new);
104989+ } else /* Another has populated it */
104990+ pmd_free(mm, new);
104991+#endif /* __ARCH_HAS_4LEVEL_HACK */
104992+ spin_unlock(&mm->page_table_lock);
104993+ return 0;
104994+}
104995 #endif /* __PAGETABLE_PMD_FOLDED */
104996
104997 static int __follow_pte(struct mm_struct *mm, unsigned long address,
104998@@ -3482,8 +3782,8 @@ out:
104999 return ret;
105000 }
105001
105002-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
105003- void *buf, int len, int write)
105004+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
105005+ void *buf, size_t len, int write)
105006 {
105007 resource_size_t phys_addr;
105008 unsigned long prot = 0;
105009@@ -3509,8 +3809,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
105010 * Access another process' address space as given in mm. If non-NULL, use the
105011 * given task for page fault accounting.
105012 */
105013-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105014- unsigned long addr, void *buf, int len, int write)
105015+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105016+ unsigned long addr, void *buf, size_t len, int write)
105017 {
105018 struct vm_area_struct *vma;
105019 void *old_buf = buf;
105020@@ -3518,7 +3818,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105021 down_read(&mm->mmap_sem);
105022 /* ignore errors, just check how much was successfully transferred */
105023 while (len) {
105024- int bytes, ret, offset;
105025+ ssize_t bytes, ret, offset;
105026 void *maddr;
105027 struct page *page = NULL;
105028
105029@@ -3579,8 +3879,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105030 *
105031 * The caller must hold a reference on @mm.
105032 */
105033-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
105034- void *buf, int len, int write)
105035+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
105036+ void *buf, size_t len, int write)
105037 {
105038 return __access_remote_vm(NULL, mm, addr, buf, len, write);
105039 }
105040@@ -3590,11 +3890,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
105041 * Source/target buffer must be kernel space,
105042 * Do not walk the page table directly, use get_user_pages
105043 */
105044-int access_process_vm(struct task_struct *tsk, unsigned long addr,
105045- void *buf, int len, int write)
105046+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
105047+ void *buf, size_t len, int write)
105048 {
105049 struct mm_struct *mm;
105050- int ret;
105051+ ssize_t ret;
105052
105053 mm = get_task_mm(tsk);
105054 if (!mm)
105055diff --git a/mm/mempolicy.c b/mm/mempolicy.c
105056index de5dc5e..68a4ea3 100644
105057--- a/mm/mempolicy.c
105058+++ b/mm/mempolicy.c
105059@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
105060 unsigned long vmstart;
105061 unsigned long vmend;
105062
105063+#ifdef CONFIG_PAX_SEGMEXEC
105064+ struct vm_area_struct *vma_m;
105065+#endif
105066+
105067 vma = find_vma(mm, start);
105068 if (!vma || vma->vm_start > start)
105069 return -EFAULT;
105070@@ -746,6 +750,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
105071 err = vma_replace_policy(vma, new_pol);
105072 if (err)
105073 goto out;
105074+
105075+#ifdef CONFIG_PAX_SEGMEXEC
105076+ vma_m = pax_find_mirror_vma(vma);
105077+ if (vma_m) {
105078+ err = vma_replace_policy(vma_m, new_pol);
105079+ if (err)
105080+ goto out;
105081+ }
105082+#endif
105083+
105084 }
105085
105086 out:
105087@@ -1160,6 +1174,17 @@ static long do_mbind(unsigned long start, unsigned long len,
105088
105089 if (end < start)
105090 return -EINVAL;
105091+
105092+#ifdef CONFIG_PAX_SEGMEXEC
105093+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
105094+ if (end > SEGMEXEC_TASK_SIZE)
105095+ return -EINVAL;
105096+ } else
105097+#endif
105098+
105099+ if (end > TASK_SIZE)
105100+ return -EINVAL;
105101+
105102 if (end == start)
105103 return 0;
105104
105105@@ -1385,8 +1410,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
105106 */
105107 tcred = __task_cred(task);
105108 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
105109- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
105110- !capable(CAP_SYS_NICE)) {
105111+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
105112 rcu_read_unlock();
105113 err = -EPERM;
105114 goto out_put;
105115@@ -1417,6 +1441,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
105116 goto out;
105117 }
105118
105119+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
105120+ if (mm != current->mm &&
105121+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
105122+ mmput(mm);
105123+ err = -EPERM;
105124+ goto out;
105125+ }
105126+#endif
105127+
105128 err = do_migrate_pages(mm, old, new,
105129 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
105130
105131diff --git a/mm/migrate.c b/mm/migrate.c
105132index 85e0426..be49beb 100644
105133--- a/mm/migrate.c
105134+++ b/mm/migrate.c
105135@@ -1472,8 +1472,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
105136 */
105137 tcred = __task_cred(task);
105138 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
105139- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
105140- !capable(CAP_SYS_NICE)) {
105141+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
105142 rcu_read_unlock();
105143 err = -EPERM;
105144 goto out;
105145diff --git a/mm/mlock.c b/mm/mlock.c
105146index 8a54cd2..92f1747 100644
105147--- a/mm/mlock.c
105148+++ b/mm/mlock.c
105149@@ -14,6 +14,7 @@
105150 #include <linux/pagevec.h>
105151 #include <linux/mempolicy.h>
105152 #include <linux/syscalls.h>
105153+#include <linux/security.h>
105154 #include <linux/sched.h>
105155 #include <linux/export.h>
105156 #include <linux/rmap.h>
105157@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
105158 {
105159 unsigned long nstart, end, tmp;
105160 struct vm_area_struct * vma, * prev;
105161- int error;
105162+ int error = 0;
105163
105164 VM_BUG_ON(start & ~PAGE_MASK);
105165 VM_BUG_ON(len != PAGE_ALIGN(len));
105166@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
105167 return -EINVAL;
105168 if (end == start)
105169 return 0;
105170+ if (end > TASK_SIZE)
105171+ return -EINVAL;
105172+
105173 vma = find_vma(current->mm, start);
105174 if (!vma || vma->vm_start > start)
105175 return -ENOMEM;
105176@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
105177 for (nstart = start ; ; ) {
105178 vm_flags_t newflags;
105179
105180+#ifdef CONFIG_PAX_SEGMEXEC
105181+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
105182+ break;
105183+#endif
105184+
105185 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
105186
105187 newflags = vma->vm_flags & ~VM_LOCKED;
105188@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
105189 locked += current->mm->locked_vm;
105190
105191 /* check against resource limits */
105192+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
105193 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
105194 error = do_mlock(start, len, 1);
105195
105196@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
105197 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
105198 vm_flags_t newflags;
105199
105200+#ifdef CONFIG_PAX_SEGMEXEC
105201+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
105202+ break;
105203+#endif
105204+
105205 newflags = vma->vm_flags & ~VM_LOCKED;
105206 if (flags & MCL_CURRENT)
105207 newflags |= VM_LOCKED;
105208@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
105209 lock_limit >>= PAGE_SHIFT;
105210
105211 ret = -ENOMEM;
105212+
105213+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
105214+
105215 down_write(&current->mm->mmap_sem);
105216-
105217 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
105218 capable(CAP_IPC_LOCK))
105219 ret = do_mlockall(flags);
105220diff --git a/mm/mm_init.c b/mm/mm_init.c
105221index 5f420f7..dd42fb1b 100644
105222--- a/mm/mm_init.c
105223+++ b/mm/mm_init.c
105224@@ -177,7 +177,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
105225 return NOTIFY_OK;
105226 }
105227
105228-static struct notifier_block compute_batch_nb __meminitdata = {
105229+static struct notifier_block compute_batch_nb __meminitconst = {
105230 .notifier_call = mm_compute_batch_notifier,
105231 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
105232 };
105233diff --git a/mm/mmap.c b/mm/mmap.c
105234index 9ec50a3..0476e2d 100644
105235--- a/mm/mmap.c
105236+++ b/mm/mmap.c
105237@@ -41,6 +41,7 @@
105238 #include <linux/notifier.h>
105239 #include <linux/memory.h>
105240 #include <linux/printk.h>
105241+#include <linux/random.h>
105242
105243 #include <asm/uaccess.h>
105244 #include <asm/cacheflush.h>
105245@@ -57,6 +58,16 @@
105246 #define arch_rebalance_pgtables(addr, len) (addr)
105247 #endif
105248
105249+static inline void verify_mm_writelocked(struct mm_struct *mm)
105250+{
105251+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
105252+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
105253+ up_read(&mm->mmap_sem);
105254+ BUG();
105255+ }
105256+#endif
105257+}
105258+
105259 static void unmap_region(struct mm_struct *mm,
105260 struct vm_area_struct *vma, struct vm_area_struct *prev,
105261 unsigned long start, unsigned long end);
105262@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
105263 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
105264 *
105265 */
105266-pgprot_t protection_map[16] = {
105267+pgprot_t protection_map[16] __read_only = {
105268 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
105269 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
105270 };
105271
105272-pgprot_t vm_get_page_prot(unsigned long vm_flags)
105273+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
105274 {
105275- return __pgprot(pgprot_val(protection_map[vm_flags &
105276+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
105277 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
105278 pgprot_val(arch_vm_get_page_prot(vm_flags)));
105279+
105280+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
105281+ if (!(__supported_pte_mask & _PAGE_NX) &&
105282+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
105283+ (vm_flags & (VM_READ | VM_WRITE)))
105284+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
105285+#endif
105286+
105287+ return prot;
105288 }
105289 EXPORT_SYMBOL(vm_get_page_prot);
105290
105291@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
105292 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
105293 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
105294 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
105295+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
105296 /*
105297 * Make sure vm_committed_as in one cacheline and not cacheline shared with
105298 * other variables. It can be updated by several CPUs frequently.
105299@@ -271,6 +292,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
105300 struct vm_area_struct *next = vma->vm_next;
105301
105302 might_sleep();
105303+ BUG_ON(vma->vm_mirror);
105304 if (vma->vm_ops && vma->vm_ops->close)
105305 vma->vm_ops->close(vma);
105306 if (vma->vm_file)
105307@@ -284,6 +306,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
105308
105309 SYSCALL_DEFINE1(brk, unsigned long, brk)
105310 {
105311+ unsigned long rlim;
105312 unsigned long retval;
105313 unsigned long newbrk, oldbrk;
105314 struct mm_struct *mm = current->mm;
105315@@ -314,7 +337,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
105316 * segment grow beyond its set limit the in case where the limit is
105317 * not page aligned -Ram Gupta
105318 */
105319- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
105320+ rlim = rlimit(RLIMIT_DATA);
105321+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
105322+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
105323+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
105324+ rlim = 4096 * PAGE_SIZE;
105325+#endif
105326+ if (check_data_rlimit(rlim, brk, mm->start_brk,
105327 mm->end_data, mm->start_data))
105328 goto out;
105329
105330@@ -967,6 +996,12 @@ static int
105331 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
105332 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
105333 {
105334+
105335+#ifdef CONFIG_PAX_SEGMEXEC
105336+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
105337+ return 0;
105338+#endif
105339+
105340 if (is_mergeable_vma(vma, file, vm_flags) &&
105341 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
105342 if (vma->vm_pgoff == vm_pgoff)
105343@@ -986,6 +1021,12 @@ static int
105344 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
105345 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
105346 {
105347+
105348+#ifdef CONFIG_PAX_SEGMEXEC
105349+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
105350+ return 0;
105351+#endif
105352+
105353 if (is_mergeable_vma(vma, file, vm_flags) &&
105354 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
105355 pgoff_t vm_pglen;
105356@@ -1035,6 +1076,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105357 struct vm_area_struct *area, *next;
105358 int err;
105359
105360+#ifdef CONFIG_PAX_SEGMEXEC
105361+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
105362+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
105363+
105364+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
105365+#endif
105366+
105367 /*
105368 * We later require that vma->vm_flags == vm_flags,
105369 * so this tests vma->vm_flags & VM_SPECIAL, too.
105370@@ -1050,6 +1098,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105371 if (next && next->vm_end == end) /* cases 6, 7, 8 */
105372 next = next->vm_next;
105373
105374+#ifdef CONFIG_PAX_SEGMEXEC
105375+ if (prev)
105376+ prev_m = pax_find_mirror_vma(prev);
105377+ if (area)
105378+ area_m = pax_find_mirror_vma(area);
105379+ if (next)
105380+ next_m = pax_find_mirror_vma(next);
105381+#endif
105382+
105383 /*
105384 * Can it merge with the predecessor?
105385 */
105386@@ -1069,9 +1126,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105387 /* cases 1, 6 */
105388 err = vma_adjust(prev, prev->vm_start,
105389 next->vm_end, prev->vm_pgoff, NULL);
105390- } else /* cases 2, 5, 7 */
105391+
105392+#ifdef CONFIG_PAX_SEGMEXEC
105393+ if (!err && prev_m)
105394+ err = vma_adjust(prev_m, prev_m->vm_start,
105395+ next_m->vm_end, prev_m->vm_pgoff, NULL);
105396+#endif
105397+
105398+ } else { /* cases 2, 5, 7 */
105399 err = vma_adjust(prev, prev->vm_start,
105400 end, prev->vm_pgoff, NULL);
105401+
105402+#ifdef CONFIG_PAX_SEGMEXEC
105403+ if (!err && prev_m)
105404+ err = vma_adjust(prev_m, prev_m->vm_start,
105405+ end_m, prev_m->vm_pgoff, NULL);
105406+#endif
105407+
105408+ }
105409 if (err)
105410 return NULL;
105411 khugepaged_enter_vma_merge(prev, vm_flags);
105412@@ -1085,12 +1157,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105413 mpol_equal(policy, vma_policy(next)) &&
105414 can_vma_merge_before(next, vm_flags,
105415 anon_vma, file, pgoff+pglen)) {
105416- if (prev && addr < prev->vm_end) /* case 4 */
105417+ if (prev && addr < prev->vm_end) { /* case 4 */
105418 err = vma_adjust(prev, prev->vm_start,
105419 addr, prev->vm_pgoff, NULL);
105420- else /* cases 3, 8 */
105421+
105422+#ifdef CONFIG_PAX_SEGMEXEC
105423+ if (!err && prev_m)
105424+ err = vma_adjust(prev_m, prev_m->vm_start,
105425+ addr_m, prev_m->vm_pgoff, NULL);
105426+#endif
105427+
105428+ } else { /* cases 3, 8 */
105429 err = vma_adjust(area, addr, next->vm_end,
105430 next->vm_pgoff - pglen, NULL);
105431+
105432+#ifdef CONFIG_PAX_SEGMEXEC
105433+ if (!err && area_m)
105434+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
105435+ next_m->vm_pgoff - pglen, NULL);
105436+#endif
105437+
105438+ }
105439 if (err)
105440 return NULL;
105441 khugepaged_enter_vma_merge(area, vm_flags);
105442@@ -1199,8 +1286,10 @@ none:
105443 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
105444 struct file *file, long pages)
105445 {
105446- const unsigned long stack_flags
105447- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
105448+
105449+#ifdef CONFIG_PAX_RANDMMAP
105450+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
105451+#endif
105452
105453 mm->total_vm += pages;
105454
105455@@ -1208,7 +1297,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
105456 mm->shared_vm += pages;
105457 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
105458 mm->exec_vm += pages;
105459- } else if (flags & stack_flags)
105460+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
105461 mm->stack_vm += pages;
105462 }
105463 #endif /* CONFIG_PROC_FS */
105464@@ -1238,6 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
105465 locked += mm->locked_vm;
105466 lock_limit = rlimit(RLIMIT_MEMLOCK);
105467 lock_limit >>= PAGE_SHIFT;
105468+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
105469 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
105470 return -EAGAIN;
105471 }
105472@@ -1264,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105473 * (the exception is when the underlying filesystem is noexec
105474 * mounted, in which case we dont add PROT_EXEC.)
105475 */
105476- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
105477+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
105478 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
105479 prot |= PROT_EXEC;
105480
105481@@ -1290,7 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105482 /* Obtain the address to map to. we verify (or select) it and ensure
105483 * that it represents a valid section of the address space.
105484 */
105485- addr = get_unmapped_area(file, addr, len, pgoff, flags);
105486+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
105487 if (addr & ~PAGE_MASK)
105488 return addr;
105489
105490@@ -1301,6 +1391,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105491 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
105492 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
105493
105494+#ifdef CONFIG_PAX_MPROTECT
105495+ if (mm->pax_flags & MF_PAX_MPROTECT) {
105496+
105497+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
105498+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
105499+ mm->binfmt->handle_mmap)
105500+ mm->binfmt->handle_mmap(file);
105501+#endif
105502+
105503+#ifndef CONFIG_PAX_MPROTECT_COMPAT
105504+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
105505+ gr_log_rwxmmap(file);
105506+
105507+#ifdef CONFIG_PAX_EMUPLT
105508+ vm_flags &= ~VM_EXEC;
105509+#else
105510+ return -EPERM;
105511+#endif
105512+
105513+ }
105514+
105515+ if (!(vm_flags & VM_EXEC))
105516+ vm_flags &= ~VM_MAYEXEC;
105517+#else
105518+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
105519+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
105520+#endif
105521+ else
105522+ vm_flags &= ~VM_MAYWRITE;
105523+ }
105524+#endif
105525+
105526+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
105527+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
105528+ vm_flags &= ~VM_PAGEEXEC;
105529+#endif
105530+
105531 if (flags & MAP_LOCKED)
105532 if (!can_do_mlock())
105533 return -EPERM;
105534@@ -1388,6 +1515,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
105535 vm_flags |= VM_NORESERVE;
105536 }
105537
105538+ if (!gr_acl_handle_mmap(file, prot))
105539+ return -EACCES;
105540+
105541 addr = mmap_region(file, addr, len, vm_flags, pgoff);
105542 if (!IS_ERR_VALUE(addr) &&
105543 ((vm_flags & VM_LOCKED) ||
105544@@ -1481,7 +1611,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
105545 vm_flags_t vm_flags = vma->vm_flags;
105546
105547 /* If it was private or non-writable, the write bit is already clear */
105548- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
105549+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
105550 return 0;
105551
105552 /* The backer wishes to know when pages are first written to? */
105553@@ -1532,7 +1662,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
105554 struct rb_node **rb_link, *rb_parent;
105555 unsigned long charged = 0;
105556
105557+#ifdef CONFIG_PAX_SEGMEXEC
105558+ struct vm_area_struct *vma_m = NULL;
105559+#endif
105560+
105561+ /*
105562+ * mm->mmap_sem is required to protect against another thread
105563+ * changing the mappings in case we sleep.
105564+ */
105565+ verify_mm_writelocked(mm);
105566+
105567 /* Check against address space limit. */
105568+
105569+#ifdef CONFIG_PAX_RANDMMAP
105570+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
105571+#endif
105572+
105573 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
105574 unsigned long nr_pages;
105575
105576@@ -1551,11 +1696,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
105577
105578 /* Clear old maps */
105579 error = -ENOMEM;
105580-munmap_back:
105581 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
105582 if (do_munmap(mm, addr, len))
105583 return -ENOMEM;
105584- goto munmap_back;
105585+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
105586 }
105587
105588 /*
105589@@ -1586,6 +1730,16 @@ munmap_back:
105590 goto unacct_error;
105591 }
105592
105593+#ifdef CONFIG_PAX_SEGMEXEC
105594+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
105595+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
105596+ if (!vma_m) {
105597+ error = -ENOMEM;
105598+ goto free_vma;
105599+ }
105600+ }
105601+#endif
105602+
105603 vma->vm_mm = mm;
105604 vma->vm_start = addr;
105605 vma->vm_end = addr + len;
105606@@ -1616,6 +1770,13 @@ munmap_back:
105607 if (error)
105608 goto unmap_and_free_vma;
105609
105610+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
105611+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
105612+ vma->vm_flags |= VM_PAGEEXEC;
105613+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
105614+ }
105615+#endif
105616+
105617 /* Can addr have changed??
105618 *
105619 * Answer: Yes, several device drivers can do it in their
105620@@ -1634,6 +1795,12 @@ munmap_back:
105621 }
105622
105623 vma_link(mm, vma, prev, rb_link, rb_parent);
105624+
105625+#ifdef CONFIG_PAX_SEGMEXEC
105626+ if (vma_m)
105627+ BUG_ON(pax_mirror_vma(vma_m, vma));
105628+#endif
105629+
105630 /* Once vma denies write, undo our temporary denial count */
105631 if (file) {
105632 if (vm_flags & VM_SHARED)
105633@@ -1646,6 +1813,7 @@ out:
105634 perf_event_mmap(vma);
105635
105636 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
105637+ track_exec_limit(mm, addr, addr + len, vm_flags);
105638 if (vm_flags & VM_LOCKED) {
105639 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
105640 vma == get_gate_vma(current->mm)))
105641@@ -1683,6 +1851,12 @@ allow_write_and_free_vma:
105642 if (vm_flags & VM_DENYWRITE)
105643 allow_write_access(file);
105644 free_vma:
105645+
105646+#ifdef CONFIG_PAX_SEGMEXEC
105647+ if (vma_m)
105648+ kmem_cache_free(vm_area_cachep, vma_m);
105649+#endif
105650+
105651 kmem_cache_free(vm_area_cachep, vma);
105652 unacct_error:
105653 if (charged)
105654@@ -1690,7 +1864,63 @@ unacct_error:
105655 return error;
105656 }
105657
105658-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
105659+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
105660+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
105661+{
105662+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
105663+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
105664+
105665+ return 0;
105666+}
105667+#endif
105668+
105669+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
105670+{
105671+ if (!vma) {
105672+#ifdef CONFIG_STACK_GROWSUP
105673+ if (addr > sysctl_heap_stack_gap)
105674+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
105675+ else
105676+ vma = find_vma(current->mm, 0);
105677+ if (vma && (vma->vm_flags & VM_GROWSUP))
105678+ return false;
105679+#endif
105680+ return true;
105681+ }
105682+
105683+ if (addr + len > vma->vm_start)
105684+ return false;
105685+
105686+ if (vma->vm_flags & VM_GROWSDOWN)
105687+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
105688+#ifdef CONFIG_STACK_GROWSUP
105689+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
105690+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
105691+#endif
105692+ else if (offset)
105693+ return offset <= vma->vm_start - addr - len;
105694+
105695+ return true;
105696+}
105697+
105698+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
105699+{
105700+ if (vma->vm_start < len)
105701+ return -ENOMEM;
105702+
105703+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
105704+ if (offset <= vma->vm_start - len)
105705+ return vma->vm_start - len - offset;
105706+ else
105707+ return -ENOMEM;
105708+ }
105709+
105710+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
105711+ return vma->vm_start - len - sysctl_heap_stack_gap;
105712+ return -ENOMEM;
105713+}
105714+
105715+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
105716 {
105717 /*
105718 * We implement the search by looking for an rbtree node that
105719@@ -1738,11 +1968,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
105720 }
105721 }
105722
105723- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
105724+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
105725 check_current:
105726 /* Check if current node has a suitable gap */
105727 if (gap_start > high_limit)
105728 return -ENOMEM;
105729+
105730+ if (gap_end - gap_start > info->threadstack_offset)
105731+ gap_start += info->threadstack_offset;
105732+ else
105733+ gap_start = gap_end;
105734+
105735+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
105736+ if (gap_end - gap_start > sysctl_heap_stack_gap)
105737+ gap_start += sysctl_heap_stack_gap;
105738+ else
105739+ gap_start = gap_end;
105740+ }
105741+ if (vma->vm_flags & VM_GROWSDOWN) {
105742+ if (gap_end - gap_start > sysctl_heap_stack_gap)
105743+ gap_end -= sysctl_heap_stack_gap;
105744+ else
105745+ gap_end = gap_start;
105746+ }
105747 if (gap_end >= low_limit && gap_end - gap_start >= length)
105748 goto found;
105749
105750@@ -1792,7 +2040,7 @@ found:
105751 return gap_start;
105752 }
105753
105754-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
105755+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
105756 {
105757 struct mm_struct *mm = current->mm;
105758 struct vm_area_struct *vma;
105759@@ -1846,6 +2094,24 @@ check_current:
105760 gap_end = vma->vm_start;
105761 if (gap_end < low_limit)
105762 return -ENOMEM;
105763+
105764+ if (gap_end - gap_start > info->threadstack_offset)
105765+ gap_end -= info->threadstack_offset;
105766+ else
105767+ gap_end = gap_start;
105768+
105769+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
105770+ if (gap_end - gap_start > sysctl_heap_stack_gap)
105771+ gap_start += sysctl_heap_stack_gap;
105772+ else
105773+ gap_start = gap_end;
105774+ }
105775+ if (vma->vm_flags & VM_GROWSDOWN) {
105776+ if (gap_end - gap_start > sysctl_heap_stack_gap)
105777+ gap_end -= sysctl_heap_stack_gap;
105778+ else
105779+ gap_end = gap_start;
105780+ }
105781 if (gap_start <= high_limit && gap_end - gap_start >= length)
105782 goto found;
105783
105784@@ -1909,6 +2175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
105785 struct mm_struct *mm = current->mm;
105786 struct vm_area_struct *vma;
105787 struct vm_unmapped_area_info info;
105788+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
105789
105790 if (len > TASK_SIZE - mmap_min_addr)
105791 return -ENOMEM;
105792@@ -1916,11 +2183,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
105793 if (flags & MAP_FIXED)
105794 return addr;
105795
105796+#ifdef CONFIG_PAX_RANDMMAP
105797+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
105798+#endif
105799+
105800 if (addr) {
105801 addr = PAGE_ALIGN(addr);
105802 vma = find_vma(mm, addr);
105803 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
105804- (!vma || addr + len <= vma->vm_start))
105805+ check_heap_stack_gap(vma, addr, len, offset))
105806 return addr;
105807 }
105808
105809@@ -1929,6 +2200,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
105810 info.low_limit = mm->mmap_base;
105811 info.high_limit = TASK_SIZE;
105812 info.align_mask = 0;
105813+ info.threadstack_offset = offset;
105814 return vm_unmapped_area(&info);
105815 }
105816 #endif
105817@@ -1947,6 +2219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
105818 struct mm_struct *mm = current->mm;
105819 unsigned long addr = addr0;
105820 struct vm_unmapped_area_info info;
105821+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
105822
105823 /* requested length too big for entire address space */
105824 if (len > TASK_SIZE - mmap_min_addr)
105825@@ -1955,12 +2228,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
105826 if (flags & MAP_FIXED)
105827 return addr;
105828
105829+#ifdef CONFIG_PAX_RANDMMAP
105830+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
105831+#endif
105832+
105833 /* requesting a specific address */
105834 if (addr) {
105835 addr = PAGE_ALIGN(addr);
105836 vma = find_vma(mm, addr);
105837 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
105838- (!vma || addr + len <= vma->vm_start))
105839+ check_heap_stack_gap(vma, addr, len, offset))
105840 return addr;
105841 }
105842
105843@@ -1969,6 +2246,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
105844 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
105845 info.high_limit = mm->mmap_base;
105846 info.align_mask = 0;
105847+ info.threadstack_offset = offset;
105848 addr = vm_unmapped_area(&info);
105849
105850 /*
105851@@ -1981,6 +2259,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
105852 VM_BUG_ON(addr != -ENOMEM);
105853 info.flags = 0;
105854 info.low_limit = TASK_UNMAPPED_BASE;
105855+
105856+#ifdef CONFIG_PAX_RANDMMAP
105857+ if (mm->pax_flags & MF_PAX_RANDMMAP)
105858+ info.low_limit += mm->delta_mmap;
105859+#endif
105860+
105861 info.high_limit = TASK_SIZE;
105862 addr = vm_unmapped_area(&info);
105863 }
105864@@ -2081,6 +2365,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
105865 return vma;
105866 }
105867
105868+#ifdef CONFIG_PAX_SEGMEXEC
105869+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
105870+{
105871+ struct vm_area_struct *vma_m;
105872+
105873+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
105874+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
105875+ BUG_ON(vma->vm_mirror);
105876+ return NULL;
105877+ }
105878+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
105879+ vma_m = vma->vm_mirror;
105880+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
105881+ BUG_ON(vma->vm_file != vma_m->vm_file);
105882+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
105883+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
105884+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
105885+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
105886+ return vma_m;
105887+}
105888+#endif
105889+
105890 /*
105891 * Verify that the stack growth is acceptable and
105892 * update accounting. This is shared with both the
105893@@ -2098,8 +2404,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
105894
105895 /* Stack limit test */
105896 actual_size = size;
105897- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
105898- actual_size -= PAGE_SIZE;
105899+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
105900 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
105901 return -ENOMEM;
105902
105903@@ -2110,6 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
105904 locked = mm->locked_vm + grow;
105905 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
105906 limit >>= PAGE_SHIFT;
105907+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
105908 if (locked > limit && !capable(CAP_IPC_LOCK))
105909 return -ENOMEM;
105910 }
105911@@ -2139,37 +2445,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
105912 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
105913 * vma is the last one with address > vma->vm_end. Have to extend vma.
105914 */
105915+#ifndef CONFIG_IA64
105916+static
105917+#endif
105918 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
105919 {
105920 int error;
105921+ bool locknext;
105922
105923 if (!(vma->vm_flags & VM_GROWSUP))
105924 return -EFAULT;
105925
105926+ /* Also guard against wrapping around to address 0. */
105927+ if (address < PAGE_ALIGN(address+1))
105928+ address = PAGE_ALIGN(address+1);
105929+ else
105930+ return -ENOMEM;
105931+
105932 /*
105933 * We must make sure the anon_vma is allocated
105934 * so that the anon_vma locking is not a noop.
105935 */
105936 if (unlikely(anon_vma_prepare(vma)))
105937 return -ENOMEM;
105938+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
105939+ if (locknext && anon_vma_prepare(vma->vm_next))
105940+ return -ENOMEM;
105941 vma_lock_anon_vma(vma);
105942+ if (locknext)
105943+ vma_lock_anon_vma(vma->vm_next);
105944
105945 /*
105946 * vma->vm_start/vm_end cannot change under us because the caller
105947 * is required to hold the mmap_sem in read mode. We need the
105948- * anon_vma lock to serialize against concurrent expand_stacks.
105949- * Also guard against wrapping around to address 0.
105950+ * anon_vma locks to serialize against concurrent expand_stacks
105951+ * and expand_upwards.
105952 */
105953- if (address < PAGE_ALIGN(address+4))
105954- address = PAGE_ALIGN(address+4);
105955- else {
105956- vma_unlock_anon_vma(vma);
105957- return -ENOMEM;
105958- }
105959 error = 0;
105960
105961 /* Somebody else might have raced and expanded it already */
105962- if (address > vma->vm_end) {
105963+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
105964+ error = -ENOMEM;
105965+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
105966 unsigned long size, grow;
105967
105968 size = address - vma->vm_start;
105969@@ -2204,6 +2521,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
105970 }
105971 }
105972 }
105973+ if (locknext)
105974+ vma_unlock_anon_vma(vma->vm_next);
105975 vma_unlock_anon_vma(vma);
105976 khugepaged_enter_vma_merge(vma, vma->vm_flags);
105977 validate_mm(vma->vm_mm);
105978@@ -2218,6 +2537,8 @@ int expand_downwards(struct vm_area_struct *vma,
105979 unsigned long address)
105980 {
105981 int error;
105982+ bool lockprev = false;
105983+ struct vm_area_struct *prev;
105984
105985 /*
105986 * We must make sure the anon_vma is allocated
105987@@ -2231,6 +2552,15 @@ int expand_downwards(struct vm_area_struct *vma,
105988 if (error)
105989 return error;
105990
105991+ prev = vma->vm_prev;
105992+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
105993+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
105994+#endif
105995+ if (lockprev && anon_vma_prepare(prev))
105996+ return -ENOMEM;
105997+ if (lockprev)
105998+ vma_lock_anon_vma(prev);
105999+
106000 vma_lock_anon_vma(vma);
106001
106002 /*
106003@@ -2240,9 +2570,17 @@ int expand_downwards(struct vm_area_struct *vma,
106004 */
106005
106006 /* Somebody else might have raced and expanded it already */
106007- if (address < vma->vm_start) {
106008+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
106009+ error = -ENOMEM;
106010+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
106011 unsigned long size, grow;
106012
106013+#ifdef CONFIG_PAX_SEGMEXEC
106014+ struct vm_area_struct *vma_m;
106015+
106016+ vma_m = pax_find_mirror_vma(vma);
106017+#endif
106018+
106019 size = vma->vm_end - address;
106020 grow = (vma->vm_start - address) >> PAGE_SHIFT;
106021
106022@@ -2267,13 +2605,27 @@ int expand_downwards(struct vm_area_struct *vma,
106023 vma->vm_pgoff -= grow;
106024 anon_vma_interval_tree_post_update_vma(vma);
106025 vma_gap_update(vma);
106026+
106027+#ifdef CONFIG_PAX_SEGMEXEC
106028+ if (vma_m) {
106029+ anon_vma_interval_tree_pre_update_vma(vma_m);
106030+ vma_m->vm_start -= grow << PAGE_SHIFT;
106031+ vma_m->vm_pgoff -= grow;
106032+ anon_vma_interval_tree_post_update_vma(vma_m);
106033+ vma_gap_update(vma_m);
106034+ }
106035+#endif
106036+
106037 spin_unlock(&vma->vm_mm->page_table_lock);
106038
106039+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
106040 perf_event_mmap(vma);
106041 }
106042 }
106043 }
106044 vma_unlock_anon_vma(vma);
106045+ if (lockprev)
106046+ vma_unlock_anon_vma(prev);
106047 khugepaged_enter_vma_merge(vma, vma->vm_flags);
106048 validate_mm(vma->vm_mm);
106049 return error;
106050@@ -2373,6 +2725,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
106051 do {
106052 long nrpages = vma_pages(vma);
106053
106054+#ifdef CONFIG_PAX_SEGMEXEC
106055+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
106056+ vma = remove_vma(vma);
106057+ continue;
106058+ }
106059+#endif
106060+
106061 if (vma->vm_flags & VM_ACCOUNT)
106062 nr_accounted += nrpages;
106063 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
106064@@ -2417,6 +2776,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
106065 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
106066 vma->vm_prev = NULL;
106067 do {
106068+
106069+#ifdef CONFIG_PAX_SEGMEXEC
106070+ if (vma->vm_mirror) {
106071+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
106072+ vma->vm_mirror->vm_mirror = NULL;
106073+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
106074+ vma->vm_mirror = NULL;
106075+ }
106076+#endif
106077+
106078 vma_rb_erase(vma, &mm->mm_rb);
106079 mm->map_count--;
106080 tail_vma = vma;
106081@@ -2444,14 +2813,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106082 struct vm_area_struct *new;
106083 int err = -ENOMEM;
106084
106085+#ifdef CONFIG_PAX_SEGMEXEC
106086+ struct vm_area_struct *vma_m, *new_m = NULL;
106087+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
106088+#endif
106089+
106090 if (is_vm_hugetlb_page(vma) && (addr &
106091 ~(huge_page_mask(hstate_vma(vma)))))
106092 return -EINVAL;
106093
106094+#ifdef CONFIG_PAX_SEGMEXEC
106095+ vma_m = pax_find_mirror_vma(vma);
106096+#endif
106097+
106098 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
106099 if (!new)
106100 goto out_err;
106101
106102+#ifdef CONFIG_PAX_SEGMEXEC
106103+ if (vma_m) {
106104+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
106105+ if (!new_m) {
106106+ kmem_cache_free(vm_area_cachep, new);
106107+ goto out_err;
106108+ }
106109+ }
106110+#endif
106111+
106112 /* most fields are the same, copy all, and then fixup */
106113 *new = *vma;
106114
106115@@ -2464,6 +2852,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106116 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
106117 }
106118
106119+#ifdef CONFIG_PAX_SEGMEXEC
106120+ if (vma_m) {
106121+ *new_m = *vma_m;
106122+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
106123+ new_m->vm_mirror = new;
106124+ new->vm_mirror = new_m;
106125+
106126+ if (new_below)
106127+ new_m->vm_end = addr_m;
106128+ else {
106129+ new_m->vm_start = addr_m;
106130+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
106131+ }
106132+ }
106133+#endif
106134+
106135 err = vma_dup_policy(vma, new);
106136 if (err)
106137 goto out_free_vma;
106138@@ -2484,6 +2888,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106139 else
106140 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
106141
106142+#ifdef CONFIG_PAX_SEGMEXEC
106143+ if (!err && vma_m) {
106144+ struct mempolicy *pol = vma_policy(new);
106145+
106146+ if (anon_vma_clone(new_m, vma_m))
106147+ goto out_free_mpol;
106148+
106149+ mpol_get(pol);
106150+ set_vma_policy(new_m, pol);
106151+
106152+ if (new_m->vm_file)
106153+ get_file(new_m->vm_file);
106154+
106155+ if (new_m->vm_ops && new_m->vm_ops->open)
106156+ new_m->vm_ops->open(new_m);
106157+
106158+ if (new_below)
106159+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
106160+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
106161+ else
106162+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
106163+
106164+ if (err) {
106165+ if (new_m->vm_ops && new_m->vm_ops->close)
106166+ new_m->vm_ops->close(new_m);
106167+ if (new_m->vm_file)
106168+ fput(new_m->vm_file);
106169+ mpol_put(pol);
106170+ }
106171+ }
106172+#endif
106173+
106174 /* Success. */
106175 if (!err)
106176 return 0;
106177@@ -2493,10 +2929,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106178 new->vm_ops->close(new);
106179 if (new->vm_file)
106180 fput(new->vm_file);
106181- unlink_anon_vmas(new);
106182 out_free_mpol:
106183 mpol_put(vma_policy(new));
106184 out_free_vma:
106185+
106186+#ifdef CONFIG_PAX_SEGMEXEC
106187+ if (new_m) {
106188+ unlink_anon_vmas(new_m);
106189+ kmem_cache_free(vm_area_cachep, new_m);
106190+ }
106191+#endif
106192+
106193+ unlink_anon_vmas(new);
106194 kmem_cache_free(vm_area_cachep, new);
106195 out_err:
106196 return err;
106197@@ -2509,6 +2953,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106198 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106199 unsigned long addr, int new_below)
106200 {
106201+
106202+#ifdef CONFIG_PAX_SEGMEXEC
106203+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
106204+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
106205+ if (mm->map_count >= sysctl_max_map_count-1)
106206+ return -ENOMEM;
106207+ } else
106208+#endif
106209+
106210 if (mm->map_count >= sysctl_max_map_count)
106211 return -ENOMEM;
106212
106213@@ -2520,11 +2973,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106214 * work. This now handles partial unmappings.
106215 * Jeremy Fitzhardinge <jeremy@goop.org>
106216 */
106217+#ifdef CONFIG_PAX_SEGMEXEC
106218 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106219 {
106220+ int ret = __do_munmap(mm, start, len);
106221+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
106222+ return ret;
106223+
106224+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
106225+}
106226+
106227+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106228+#else
106229+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106230+#endif
106231+{
106232 unsigned long end;
106233 struct vm_area_struct *vma, *prev, *last;
106234
106235+ /*
106236+ * mm->mmap_sem is required to protect against another thread
106237+ * changing the mappings in case we sleep.
106238+ */
106239+ verify_mm_writelocked(mm);
106240+
106241 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
106242 return -EINVAL;
106243
106244@@ -2602,6 +3074,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106245 /* Fix up all other VM information */
106246 remove_vma_list(mm, vma);
106247
106248+ track_exec_limit(mm, start, end, 0UL);
106249+
106250 return 0;
106251 }
106252
106253@@ -2610,6 +3084,13 @@ int vm_munmap(unsigned long start, size_t len)
106254 int ret;
106255 struct mm_struct *mm = current->mm;
106256
106257+
106258+#ifdef CONFIG_PAX_SEGMEXEC
106259+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
106260+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
106261+ return -EINVAL;
106262+#endif
106263+
106264 down_write(&mm->mmap_sem);
106265 ret = do_munmap(mm, start, len);
106266 up_write(&mm->mmap_sem);
106267@@ -2656,6 +3137,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
106268 down_write(&mm->mmap_sem);
106269 vma = find_vma(mm, start);
106270
106271+#ifdef CONFIG_PAX_SEGMEXEC
106272+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
106273+ goto out;
106274+#endif
106275+
106276 if (!vma || !(vma->vm_flags & VM_SHARED))
106277 goto out;
106278
106279@@ -2692,16 +3178,6 @@ out:
106280 return ret;
106281 }
106282
106283-static inline void verify_mm_writelocked(struct mm_struct *mm)
106284-{
106285-#ifdef CONFIG_DEBUG_VM
106286- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
106287- WARN_ON(1);
106288- up_read(&mm->mmap_sem);
106289- }
106290-#endif
106291-}
106292-
106293 /*
106294 * this is really a simplified "do_mmap". it only handles
106295 * anonymous maps. eventually we may be able to do some
106296@@ -2715,6 +3191,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106297 struct rb_node **rb_link, *rb_parent;
106298 pgoff_t pgoff = addr >> PAGE_SHIFT;
106299 int error;
106300+ unsigned long charged;
106301
106302 len = PAGE_ALIGN(len);
106303 if (!len)
106304@@ -2722,10 +3199,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106305
106306 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
106307
106308+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
106309+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
106310+ flags &= ~VM_EXEC;
106311+
106312+#ifdef CONFIG_PAX_MPROTECT
106313+ if (mm->pax_flags & MF_PAX_MPROTECT)
106314+ flags &= ~VM_MAYEXEC;
106315+#endif
106316+
106317+ }
106318+#endif
106319+
106320 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
106321 if (error & ~PAGE_MASK)
106322 return error;
106323
106324+ charged = len >> PAGE_SHIFT;
106325+
106326 error = mlock_future_check(mm, mm->def_flags, len);
106327 if (error)
106328 return error;
106329@@ -2739,21 +3230,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106330 /*
106331 * Clear old maps. this also does some error checking for us
106332 */
106333- munmap_back:
106334 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
106335 if (do_munmap(mm, addr, len))
106336 return -ENOMEM;
106337- goto munmap_back;
106338+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
106339 }
106340
106341 /* Check against address space limits *after* clearing old maps... */
106342- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
106343+ if (!may_expand_vm(mm, charged))
106344 return -ENOMEM;
106345
106346 if (mm->map_count > sysctl_max_map_count)
106347 return -ENOMEM;
106348
106349- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
106350+ if (security_vm_enough_memory_mm(mm, charged))
106351 return -ENOMEM;
106352
106353 /* Can we just expand an old private anonymous mapping? */
106354@@ -2767,7 +3257,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106355 */
106356 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106357 if (!vma) {
106358- vm_unacct_memory(len >> PAGE_SHIFT);
106359+ vm_unacct_memory(charged);
106360 return -ENOMEM;
106361 }
106362
106363@@ -2781,10 +3271,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106364 vma_link(mm, vma, prev, rb_link, rb_parent);
106365 out:
106366 perf_event_mmap(vma);
106367- mm->total_vm += len >> PAGE_SHIFT;
106368+ mm->total_vm += charged;
106369 if (flags & VM_LOCKED)
106370- mm->locked_vm += (len >> PAGE_SHIFT);
106371+ mm->locked_vm += charged;
106372 vma->vm_flags |= VM_SOFTDIRTY;
106373+ track_exec_limit(mm, addr, addr + len, flags);
106374 return addr;
106375 }
106376
106377@@ -2846,6 +3337,7 @@ void exit_mmap(struct mm_struct *mm)
106378 while (vma) {
106379 if (vma->vm_flags & VM_ACCOUNT)
106380 nr_accounted += vma_pages(vma);
106381+ vma->vm_mirror = NULL;
106382 vma = remove_vma(vma);
106383 }
106384 vm_unacct_memory(nr_accounted);
106385@@ -2860,6 +3352,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
106386 struct vm_area_struct *prev;
106387 struct rb_node **rb_link, *rb_parent;
106388
106389+#ifdef CONFIG_PAX_SEGMEXEC
106390+ struct vm_area_struct *vma_m = NULL;
106391+#endif
106392+
106393+ if (security_mmap_addr(vma->vm_start))
106394+ return -EPERM;
106395+
106396 /*
106397 * The vm_pgoff of a purely anonymous vma should be irrelevant
106398 * until its first write fault, when page's anon_vma and index
106399@@ -2883,7 +3382,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
106400 security_vm_enough_memory_mm(mm, vma_pages(vma)))
106401 return -ENOMEM;
106402
106403+#ifdef CONFIG_PAX_SEGMEXEC
106404+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
106405+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106406+ if (!vma_m)
106407+ return -ENOMEM;
106408+ }
106409+#endif
106410+
106411 vma_link(mm, vma, prev, rb_link, rb_parent);
106412+
106413+#ifdef CONFIG_PAX_SEGMEXEC
106414+ if (vma_m)
106415+ BUG_ON(pax_mirror_vma(vma_m, vma));
106416+#endif
106417+
106418 return 0;
106419 }
106420
106421@@ -2902,6 +3415,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
106422 struct rb_node **rb_link, *rb_parent;
106423 bool faulted_in_anon_vma = true;
106424
106425+ BUG_ON(vma->vm_mirror);
106426+
106427 /*
106428 * If anonymous vma has not yet been faulted, update new pgoff
106429 * to match new location, to increase its chance of merging.
106430@@ -2966,6 +3481,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
106431 return NULL;
106432 }
106433
106434+#ifdef CONFIG_PAX_SEGMEXEC
106435+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
106436+{
106437+ struct vm_area_struct *prev_m;
106438+ struct rb_node **rb_link_m, *rb_parent_m;
106439+ struct mempolicy *pol_m;
106440+
106441+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
106442+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
106443+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
106444+ *vma_m = *vma;
106445+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
106446+ if (anon_vma_clone(vma_m, vma))
106447+ return -ENOMEM;
106448+ pol_m = vma_policy(vma_m);
106449+ mpol_get(pol_m);
106450+ set_vma_policy(vma_m, pol_m);
106451+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
106452+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
106453+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
106454+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
106455+ if (vma_m->vm_file)
106456+ get_file(vma_m->vm_file);
106457+ if (vma_m->vm_ops && vma_m->vm_ops->open)
106458+ vma_m->vm_ops->open(vma_m);
106459+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
106460+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
106461+ vma_m->vm_mirror = vma;
106462+ vma->vm_mirror = vma_m;
106463+ return 0;
106464+}
106465+#endif
106466+
106467 /*
106468 * Return true if the calling process may expand its vm space by the passed
106469 * number of pages
106470@@ -2977,6 +3525,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
106471
106472 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
106473
106474+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
106475 if (cur + npages > lim)
106476 return 0;
106477 return 1;
106478@@ -3059,6 +3608,22 @@ static struct vm_area_struct *__install_special_mapping(
106479 vma->vm_start = addr;
106480 vma->vm_end = addr + len;
106481
106482+#ifdef CONFIG_PAX_MPROTECT
106483+ if (mm->pax_flags & MF_PAX_MPROTECT) {
106484+#ifndef CONFIG_PAX_MPROTECT_COMPAT
106485+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
106486+ return ERR_PTR(-EPERM);
106487+ if (!(vm_flags & VM_EXEC))
106488+ vm_flags &= ~VM_MAYEXEC;
106489+#else
106490+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
106491+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
106492+#endif
106493+ else
106494+ vm_flags &= ~VM_MAYWRITE;
106495+ }
106496+#endif
106497+
106498 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
106499 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
106500
106501diff --git a/mm/mprotect.c b/mm/mprotect.c
106502index 8858483..8145fa5 100644
106503--- a/mm/mprotect.c
106504+++ b/mm/mprotect.c
106505@@ -24,10 +24,18 @@
106506 #include <linux/migrate.h>
106507 #include <linux/perf_event.h>
106508 #include <linux/ksm.h>
106509+#include <linux/sched/sysctl.h>
106510+
106511+#ifdef CONFIG_PAX_MPROTECT
106512+#include <linux/elf.h>
106513+#include <linux/binfmts.h>
106514+#endif
106515+
106516 #include <asm/uaccess.h>
106517 #include <asm/pgtable.h>
106518 #include <asm/cacheflush.h>
106519 #include <asm/tlbflush.h>
106520+#include <asm/mmu_context.h>
106521
106522 /*
106523 * For a prot_numa update we only hold mmap_sem for read so there is a
106524@@ -252,6 +260,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
106525 return pages;
106526 }
106527
106528+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
106529+/* called while holding the mmap semaphor for writing except stack expansion */
106530+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
106531+{
106532+ unsigned long oldlimit, newlimit = 0UL;
106533+
106534+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
106535+ return;
106536+
106537+ spin_lock(&mm->page_table_lock);
106538+ oldlimit = mm->context.user_cs_limit;
106539+ if ((prot & VM_EXEC) && oldlimit < end)
106540+ /* USER_CS limit moved up */
106541+ newlimit = end;
106542+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
106543+ /* USER_CS limit moved down */
106544+ newlimit = start;
106545+
106546+ if (newlimit) {
106547+ mm->context.user_cs_limit = newlimit;
106548+
106549+#ifdef CONFIG_SMP
106550+ wmb();
106551+ cpus_clear(mm->context.cpu_user_cs_mask);
106552+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
106553+#endif
106554+
106555+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
106556+ }
106557+ spin_unlock(&mm->page_table_lock);
106558+ if (newlimit == end) {
106559+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
106560+
106561+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
106562+ if (is_vm_hugetlb_page(vma))
106563+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
106564+ else
106565+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
106566+ }
106567+}
106568+#endif
106569+
106570 int
106571 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
106572 unsigned long start, unsigned long end, unsigned long newflags)
106573@@ -264,11 +314,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
106574 int error;
106575 int dirty_accountable = 0;
106576
106577+#ifdef CONFIG_PAX_SEGMEXEC
106578+ struct vm_area_struct *vma_m = NULL;
106579+ unsigned long start_m, end_m;
106580+
106581+ start_m = start + SEGMEXEC_TASK_SIZE;
106582+ end_m = end + SEGMEXEC_TASK_SIZE;
106583+#endif
106584+
106585 if (newflags == oldflags) {
106586 *pprev = vma;
106587 return 0;
106588 }
106589
106590+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
106591+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
106592+
106593+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
106594+ return -ENOMEM;
106595+
106596+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
106597+ return -ENOMEM;
106598+ }
106599+
106600 /*
106601 * If we make a private mapping writable we increase our commit;
106602 * but (without finer accounting) cannot reduce our commit if we
106603@@ -285,6 +353,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
106604 }
106605 }
106606
106607+#ifdef CONFIG_PAX_SEGMEXEC
106608+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
106609+ if (start != vma->vm_start) {
106610+ error = split_vma(mm, vma, start, 1);
106611+ if (error)
106612+ goto fail;
106613+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
106614+ *pprev = (*pprev)->vm_next;
106615+ }
106616+
106617+ if (end != vma->vm_end) {
106618+ error = split_vma(mm, vma, end, 0);
106619+ if (error)
106620+ goto fail;
106621+ }
106622+
106623+ if (pax_find_mirror_vma(vma)) {
106624+ error = __do_munmap(mm, start_m, end_m - start_m);
106625+ if (error)
106626+ goto fail;
106627+ } else {
106628+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106629+ if (!vma_m) {
106630+ error = -ENOMEM;
106631+ goto fail;
106632+ }
106633+ vma->vm_flags = newflags;
106634+ error = pax_mirror_vma(vma_m, vma);
106635+ if (error) {
106636+ vma->vm_flags = oldflags;
106637+ goto fail;
106638+ }
106639+ }
106640+ }
106641+#endif
106642+
106643 /*
106644 * First try to merge with previous and/or next vma.
106645 */
106646@@ -315,7 +419,19 @@ success:
106647 * vm_flags and vm_page_prot are protected by the mmap_sem
106648 * held in write mode.
106649 */
106650+
106651+#ifdef CONFIG_PAX_SEGMEXEC
106652+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
106653+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
106654+#endif
106655+
106656 vma->vm_flags = newflags;
106657+
106658+#ifdef CONFIG_PAX_MPROTECT
106659+ if (mm->binfmt && mm->binfmt->handle_mprotect)
106660+ mm->binfmt->handle_mprotect(vma, newflags);
106661+#endif
106662+
106663 dirty_accountable = vma_wants_writenotify(vma);
106664 vma_set_page_prot(vma);
106665
106666@@ -351,6 +467,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
106667 end = start + len;
106668 if (end <= start)
106669 return -ENOMEM;
106670+
106671+#ifdef CONFIG_PAX_SEGMEXEC
106672+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
106673+ if (end > SEGMEXEC_TASK_SIZE)
106674+ return -EINVAL;
106675+ } else
106676+#endif
106677+
106678+ if (end > TASK_SIZE)
106679+ return -EINVAL;
106680+
106681 if (!arch_validate_prot(prot))
106682 return -EINVAL;
106683
106684@@ -358,7 +485,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
106685 /*
106686 * Does the application expect PROT_READ to imply PROT_EXEC:
106687 */
106688- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
106689+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
106690 prot |= PROT_EXEC;
106691
106692 vm_flags = calc_vm_prot_bits(prot);
106693@@ -390,6 +517,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
106694 if (start > vma->vm_start)
106695 prev = vma;
106696
106697+#ifdef CONFIG_PAX_MPROTECT
106698+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
106699+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
106700+#endif
106701+
106702 for (nstart = start ; ; ) {
106703 unsigned long newflags;
106704
106705@@ -400,6 +532,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
106706
106707 /* newflags >> 4 shift VM_MAY% in place of VM_% */
106708 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
106709+ if (prot & (PROT_WRITE | PROT_EXEC))
106710+ gr_log_rwxmprotect(vma);
106711+
106712+ error = -EACCES;
106713+ goto out;
106714+ }
106715+
106716+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
106717 error = -EACCES;
106718 goto out;
106719 }
106720@@ -414,6 +554,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
106721 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
106722 if (error)
106723 goto out;
106724+
106725+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
106726+
106727 nstart = tmp;
106728
106729 if (nstart < prev->vm_end)
106730diff --git a/mm/mremap.c b/mm/mremap.c
106731index 2dc44b1..caa1819 100644
106732--- a/mm/mremap.c
106733+++ b/mm/mremap.c
106734@@ -142,6 +142,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
106735 continue;
106736 pte = ptep_get_and_clear(mm, old_addr, old_pte);
106737 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
106738+
106739+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
106740+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
106741+ pte = pte_exprotect(pte);
106742+#endif
106743+
106744 pte = move_soft_dirty_pte(pte);
106745 set_pte_at(mm, new_addr, new_pte, pte);
106746 }
106747@@ -350,6 +356,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
106748 if (is_vm_hugetlb_page(vma))
106749 goto Einval;
106750
106751+#ifdef CONFIG_PAX_SEGMEXEC
106752+ if (pax_find_mirror_vma(vma))
106753+ goto Einval;
106754+#endif
106755+
106756 /* We can't remap across vm area boundaries */
106757 if (old_len > vma->vm_end - addr)
106758 goto Efault;
106759@@ -405,20 +416,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
106760 unsigned long ret = -EINVAL;
106761 unsigned long charged = 0;
106762 unsigned long map_flags;
106763+ unsigned long pax_task_size = TASK_SIZE;
106764
106765 if (new_addr & ~PAGE_MASK)
106766 goto out;
106767
106768- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
106769+#ifdef CONFIG_PAX_SEGMEXEC
106770+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
106771+ pax_task_size = SEGMEXEC_TASK_SIZE;
106772+#endif
106773+
106774+ pax_task_size -= PAGE_SIZE;
106775+
106776+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
106777 goto out;
106778
106779 /* Check if the location we're moving into overlaps the
106780 * old location at all, and fail if it does.
106781 */
106782- if ((new_addr <= addr) && (new_addr+new_len) > addr)
106783- goto out;
106784-
106785- if ((addr <= new_addr) && (addr+old_len) > new_addr)
106786+ if (addr + old_len > new_addr && new_addr + new_len > addr)
106787 goto out;
106788
106789 ret = do_munmap(mm, new_addr, new_len);
106790@@ -487,6 +503,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
106791 unsigned long ret = -EINVAL;
106792 unsigned long charged = 0;
106793 bool locked = false;
106794+ unsigned long pax_task_size = TASK_SIZE;
106795
106796 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
106797 return ret;
106798@@ -508,6 +525,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
106799 if (!new_len)
106800 return ret;
106801
106802+#ifdef CONFIG_PAX_SEGMEXEC
106803+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
106804+ pax_task_size = SEGMEXEC_TASK_SIZE;
106805+#endif
106806+
106807+ pax_task_size -= PAGE_SIZE;
106808+
106809+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
106810+ old_len > pax_task_size || addr > pax_task_size-old_len)
106811+ return ret;
106812+
106813 down_write(&current->mm->mmap_sem);
106814
106815 if (flags & MREMAP_FIXED) {
106816@@ -558,6 +586,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
106817 new_addr = addr;
106818 }
106819 ret = addr;
106820+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
106821 goto out;
106822 }
106823 }
106824@@ -581,7 +610,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
106825 goto out;
106826 }
106827
106828+ map_flags = vma->vm_flags;
106829 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
106830+ if (!(ret & ~PAGE_MASK)) {
106831+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
106832+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
106833+ }
106834 }
106835 out:
106836 if (ret & ~PAGE_MASK)
106837diff --git a/mm/nommu.c b/mm/nommu.c
106838index 3fba2dc..fdad748 100644
106839--- a/mm/nommu.c
106840+++ b/mm/nommu.c
106841@@ -72,7 +72,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
106842 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
106843 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
106844 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
106845-int heap_stack_gap = 0;
106846
106847 atomic_long_t mmap_pages_allocated;
106848
106849@@ -892,15 +891,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
106850 EXPORT_SYMBOL(find_vma);
106851
106852 /*
106853- * find a VMA
106854- * - we don't extend stack VMAs under NOMMU conditions
106855- */
106856-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
106857-{
106858- return find_vma(mm, addr);
106859-}
106860-
106861-/*
106862 * expand a stack to a given address
106863 * - not supported under NOMMU conditions
106864 */
106865@@ -1585,6 +1575,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106866
106867 /* most fields are the same, copy all, and then fixup */
106868 *new = *vma;
106869+ INIT_LIST_HEAD(&new->anon_vma_chain);
106870 *region = *vma->vm_region;
106871 new->vm_region = region;
106872
106873@@ -2007,8 +1998,8 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
106874 }
106875 EXPORT_SYMBOL(filemap_map_pages);
106876
106877-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
106878- unsigned long addr, void *buf, int len, int write)
106879+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
106880+ unsigned long addr, void *buf, size_t len, int write)
106881 {
106882 struct vm_area_struct *vma;
106883
106884@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
106885 *
106886 * The caller must hold a reference on @mm.
106887 */
106888-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
106889- void *buf, int len, int write)
106890+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
106891+ void *buf, size_t len, int write)
106892 {
106893 return __access_remote_vm(NULL, mm, addr, buf, len, write);
106894 }
106895@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
106896 * Access another process' address space.
106897 * - source/target buffer must be kernel space
106898 */
106899-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
106900+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
106901 {
106902 struct mm_struct *mm;
106903
106904diff --git a/mm/page-writeback.c b/mm/page-writeback.c
106905index ad05f2f..cee723a 100644
106906--- a/mm/page-writeback.c
106907+++ b/mm/page-writeback.c
106908@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
106909 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
106910 * - the bdi dirty thresh drops quickly due to change of JBOD workload
106911 */
106912-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
106913+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
106914 unsigned long thresh,
106915 unsigned long bg_thresh,
106916 unsigned long dirty,
106917diff --git a/mm/page_alloc.c b/mm/page_alloc.c
106918index 40e2942..0eb29a2 100644
106919--- a/mm/page_alloc.c
106920+++ b/mm/page_alloc.c
106921@@ -61,6 +61,7 @@
106922 #include <linux/hugetlb.h>
106923 #include <linux/sched/rt.h>
106924 #include <linux/page_owner.h>
106925+#include <linux/random.h>
106926
106927 #include <asm/sections.h>
106928 #include <asm/tlbflush.h>
106929@@ -357,7 +358,7 @@ out:
106930 * This usage means that zero-order pages may not be compound.
106931 */
106932
106933-static void free_compound_page(struct page *page)
106934+void free_compound_page(struct page *page)
106935 {
106936 __free_pages_ok(page, compound_order(page));
106937 }
106938@@ -480,7 +481,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
106939 __mod_zone_freepage_state(zone, (1 << order), migratetype);
106940 }
106941 #else
106942-struct page_ext_operations debug_guardpage_ops = { NULL, };
106943+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
106944 static inline void set_page_guard(struct zone *zone, struct page *page,
106945 unsigned int order, int migratetype) {}
106946 static inline void clear_page_guard(struct zone *zone, struct page *page,
106947@@ -783,6 +784,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
106948 bool compound = PageCompound(page);
106949 int i, bad = 0;
106950
106951+#ifdef CONFIG_PAX_MEMORY_SANITIZE
106952+ unsigned long index = 1UL << order;
106953+#endif
106954+
106955 VM_BUG_ON_PAGE(PageTail(page), page);
106956 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
106957
106958@@ -809,6 +814,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
106959 debug_check_no_obj_freed(page_address(page),
106960 PAGE_SIZE << order);
106961 }
106962+
106963+#ifdef CONFIG_PAX_MEMORY_SANITIZE
106964+ for (; index; --index)
106965+ sanitize_highpage(page + index - 1);
106966+#endif
106967+
106968 arch_free_page(page, order);
106969 kernel_map_pages(page, 1 << order, 0);
106970
106971@@ -832,6 +843,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
106972 local_irq_restore(flags);
106973 }
106974
106975+#ifdef CONFIG_PAX_LATENT_ENTROPY
106976+bool __meminitdata extra_latent_entropy;
106977+
106978+static int __init setup_pax_extra_latent_entropy(char *str)
106979+{
106980+ extra_latent_entropy = true;
106981+ return 0;
106982+}
106983+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
106984+
106985+volatile u64 latent_entropy __latent_entropy;
106986+EXPORT_SYMBOL(latent_entropy);
106987+#endif
106988+
106989 void __init __free_pages_bootmem(struct page *page, unsigned int order)
106990 {
106991 unsigned int nr_pages = 1 << order;
106992@@ -847,6 +872,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
106993 __ClearPageReserved(p);
106994 set_page_count(p, 0);
106995
106996+#ifdef CONFIG_PAX_LATENT_ENTROPY
106997+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
106998+ u64 hash = 0;
106999+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
107000+ const u64 *data = lowmem_page_address(page);
107001+
107002+ for (index = 0; index < end; index++)
107003+ hash ^= hash + data[index];
107004+ latent_entropy ^= hash;
107005+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
107006+ }
107007+#endif
107008+
107009 page_zone(page)->managed_pages += nr_pages;
107010 set_page_refcounted(page);
107011 __free_pages(page, order);
107012@@ -974,8 +1012,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
107013 kernel_map_pages(page, 1 << order, 1);
107014 kasan_alloc_pages(page, order);
107015
107016+#ifndef CONFIG_PAX_MEMORY_SANITIZE
107017 if (gfp_flags & __GFP_ZERO)
107018 prep_zero_page(page, order, gfp_flags);
107019+#endif
107020
107021 if (order && (gfp_flags & __GFP_COMP))
107022 prep_compound_page(page, order);
107023@@ -1699,7 +1739,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
107024 }
107025
107026 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
107027- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
107028+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
107029 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
107030 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
107031
107032@@ -2018,7 +2058,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
107033 do {
107034 mod_zone_page_state(zone, NR_ALLOC_BATCH,
107035 high_wmark_pages(zone) - low_wmark_pages(zone) -
107036- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
107037+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
107038 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
107039 } while (zone++ != preferred_zone);
107040 }
107041@@ -5738,7 +5778,7 @@ static void __setup_per_zone_wmarks(void)
107042
107043 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
107044 high_wmark_pages(zone) - low_wmark_pages(zone) -
107045- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
107046+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
107047
107048 setup_zone_migrate_reserve(zone);
107049 spin_unlock_irqrestore(&zone->lock, flags);
107050diff --git a/mm/percpu.c b/mm/percpu.c
107051index 73c97a5..508ee25 100644
107052--- a/mm/percpu.c
107053+++ b/mm/percpu.c
107054@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
107055 static unsigned int pcpu_high_unit_cpu __read_mostly;
107056
107057 /* the address of the first chunk which starts with the kernel static area */
107058-void *pcpu_base_addr __read_mostly;
107059+void *pcpu_base_addr __read_only;
107060 EXPORT_SYMBOL_GPL(pcpu_base_addr);
107061
107062 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
107063diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
107064index b159769..d07037f 100644
107065--- a/mm/process_vm_access.c
107066+++ b/mm/process_vm_access.c
107067@@ -13,6 +13,7 @@
107068 #include <linux/uio.h>
107069 #include <linux/sched.h>
107070 #include <linux/highmem.h>
107071+#include <linux/security.h>
107072 #include <linux/ptrace.h>
107073 #include <linux/slab.h>
107074 #include <linux/syscalls.h>
107075@@ -154,19 +155,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
107076 ssize_t iov_len;
107077 size_t total_len = iov_iter_count(iter);
107078
107079+ return -ENOSYS; // PaX: until properly audited
107080+
107081 /*
107082 * Work out how many pages of struct pages we're going to need
107083 * when eventually calling get_user_pages
107084 */
107085 for (i = 0; i < riovcnt; i++) {
107086 iov_len = rvec[i].iov_len;
107087- if (iov_len > 0) {
107088- nr_pages_iov = ((unsigned long)rvec[i].iov_base
107089- + iov_len)
107090- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
107091- / PAGE_SIZE + 1;
107092- nr_pages = max(nr_pages, nr_pages_iov);
107093- }
107094+ if (iov_len <= 0)
107095+ continue;
107096+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
107097+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
107098+ nr_pages = max(nr_pages, nr_pages_iov);
107099 }
107100
107101 if (nr_pages == 0)
107102@@ -194,6 +195,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
107103 goto free_proc_pages;
107104 }
107105
107106+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
107107+ rc = -EPERM;
107108+ goto put_task_struct;
107109+ }
107110+
107111 mm = mm_access(task, PTRACE_MODE_ATTACH);
107112 if (!mm || IS_ERR(mm)) {
107113 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
107114diff --git a/mm/rmap.c b/mm/rmap.c
107115index c161a14..8a069bb 100644
107116--- a/mm/rmap.c
107117+++ b/mm/rmap.c
107118@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107119 struct anon_vma *anon_vma = vma->anon_vma;
107120 struct anon_vma_chain *avc;
107121
107122+#ifdef CONFIG_PAX_SEGMEXEC
107123+ struct anon_vma_chain *avc_m = NULL;
107124+#endif
107125+
107126 might_sleep();
107127 if (unlikely(!anon_vma)) {
107128 struct mm_struct *mm = vma->vm_mm;
107129@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107130 if (!avc)
107131 goto out_enomem;
107132
107133+#ifdef CONFIG_PAX_SEGMEXEC
107134+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
107135+ if (!avc_m)
107136+ goto out_enomem_free_avc;
107137+#endif
107138+
107139 anon_vma = find_mergeable_anon_vma(vma);
107140 allocated = NULL;
107141 if (!anon_vma) {
107142@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107143 /* page_table_lock to protect against threads */
107144 spin_lock(&mm->page_table_lock);
107145 if (likely(!vma->anon_vma)) {
107146+
107147+#ifdef CONFIG_PAX_SEGMEXEC
107148+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
107149+
107150+ if (vma_m) {
107151+ BUG_ON(vma_m->anon_vma);
107152+ vma_m->anon_vma = anon_vma;
107153+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
107154+ anon_vma->degree++;
107155+ avc_m = NULL;
107156+ }
107157+#endif
107158+
107159 vma->anon_vma = anon_vma;
107160 anon_vma_chain_link(vma, avc, anon_vma);
107161 /* vma reference or self-parent link for new root */
107162@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107163
107164 if (unlikely(allocated))
107165 put_anon_vma(allocated);
107166+
107167+#ifdef CONFIG_PAX_SEGMEXEC
107168+ if (unlikely(avc_m))
107169+ anon_vma_chain_free(avc_m);
107170+#endif
107171+
107172 if (unlikely(avc))
107173 anon_vma_chain_free(avc);
107174 }
107175 return 0;
107176
107177 out_enomem_free_avc:
107178+
107179+#ifdef CONFIG_PAX_SEGMEXEC
107180+ if (avc_m)
107181+ anon_vma_chain_free(avc_m);
107182+#endif
107183+
107184 anon_vma_chain_free(avc);
107185 out_enomem:
107186 return -ENOMEM;
107187@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
107188 * good chance of avoiding scanning the whole hierarchy when it searches where
107189 * page is mapped.
107190 */
107191-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
107192+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
107193 {
107194 struct anon_vma_chain *avc, *pavc;
107195 struct anon_vma *root = NULL;
107196@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
107197 * the corresponding VMA in the parent process is attached to.
107198 * Returns 0 on success, non-zero on failure.
107199 */
107200-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
107201+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
107202 {
107203 struct anon_vma_chain *avc;
107204 struct anon_vma *anon_vma;
107205@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
107206 void __init anon_vma_init(void)
107207 {
107208 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
107209- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
107210- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
107211+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
107212+ anon_vma_ctor);
107213+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
107214+ SLAB_PANIC|SLAB_NO_SANITIZE);
107215 }
107216
107217 /*
107218diff --git a/mm/shmem.c b/mm/shmem.c
107219index cf2d0ca..ec06b8b 100644
107220--- a/mm/shmem.c
107221+++ b/mm/shmem.c
107222@@ -33,7 +33,7 @@
107223 #include <linux/swap.h>
107224 #include <linux/aio.h>
107225
107226-static struct vfsmount *shm_mnt;
107227+struct vfsmount *shm_mnt;
107228
107229 #ifdef CONFIG_SHMEM
107230 /*
107231@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
107232 #define BOGO_DIRENT_SIZE 20
107233
107234 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
107235-#define SHORT_SYMLINK_LEN 128
107236+#define SHORT_SYMLINK_LEN 64
107237
107238 /*
107239 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
107240@@ -2555,6 +2555,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
107241 static int shmem_xattr_validate(const char *name)
107242 {
107243 struct { const char *prefix; size_t len; } arr[] = {
107244+
107245+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
107246+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
107247+#endif
107248+
107249 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
107250 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
107251 };
107252@@ -2610,6 +2615,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
107253 if (err)
107254 return err;
107255
107256+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
107257+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
107258+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
107259+ return -EOPNOTSUPP;
107260+ if (size > 8)
107261+ return -EINVAL;
107262+ }
107263+#endif
107264+
107265 return simple_xattr_set(&info->xattrs, name, value, size, flags);
107266 }
107267
107268@@ -2993,8 +3007,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
107269 int err = -ENOMEM;
107270
107271 /* Round up to L1_CACHE_BYTES to resist false sharing */
107272- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
107273- L1_CACHE_BYTES), GFP_KERNEL);
107274+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
107275 if (!sbinfo)
107276 return -ENOMEM;
107277
107278diff --git a/mm/slab.c b/mm/slab.c
107279index c4b89ea..20990be 100644
107280--- a/mm/slab.c
107281+++ b/mm/slab.c
107282@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
107283 if ((x)->max_freeable < i) \
107284 (x)->max_freeable = i; \
107285 } while (0)
107286-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
107287-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
107288-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
107289-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
107290+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
107291+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
107292+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
107293+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
107294+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
107295+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
107296 #else
107297 #define STATS_INC_ACTIVE(x) do { } while (0)
107298 #define STATS_DEC_ACTIVE(x) do { } while (0)
107299@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
107300 #define STATS_INC_ALLOCMISS(x) do { } while (0)
107301 #define STATS_INC_FREEHIT(x) do { } while (0)
107302 #define STATS_INC_FREEMISS(x) do { } while (0)
107303+#define STATS_INC_SANITIZED(x) do { } while (0)
107304+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
107305 #endif
107306
107307 #if DEBUG
107308@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
107309 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
107310 */
107311 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
107312- const struct page *page, void *obj)
107313+ const struct page *page, const void *obj)
107314 {
107315 u32 offset = (obj - page->s_mem);
107316 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
107317@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
107318 * structures first. Without this, further allocations will bug.
107319 */
107320 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
107321- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
107322+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
107323 slab_state = PARTIAL_NODE;
107324
107325 slab_early_init = 0;
107326@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
107327
107328 cachep = find_mergeable(size, align, flags, name, ctor);
107329 if (cachep) {
107330- cachep->refcount++;
107331+ atomic_inc(&cachep->refcount);
107332
107333 /*
107334 * Adjust the object sizes so that we clear
107335@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
107336 struct array_cache *ac = cpu_cache_get(cachep);
107337
107338 check_irq_off();
107339+
107340+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107341+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
107342+ STATS_INC_NOT_SANITIZED(cachep);
107343+ else {
107344+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
107345+
107346+ if (cachep->ctor)
107347+ cachep->ctor(objp);
107348+
107349+ STATS_INC_SANITIZED(cachep);
107350+ }
107351+#endif
107352+
107353 kmemleak_free_recursive(objp, cachep->flags);
107354 objp = cache_free_debugcheck(cachep, objp, caller);
107355
107356@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
107357 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
107358 }
107359
107360-void *__kmalloc_node(size_t size, gfp_t flags, int node)
107361+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
107362 {
107363 return __do_kmalloc_node(size, flags, node, _RET_IP_);
107364 }
107365@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
107366 * @flags: the type of memory to allocate (see kmalloc).
107367 * @caller: function caller for debug tracking of the caller
107368 */
107369-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
107370+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
107371 unsigned long caller)
107372 {
107373 struct kmem_cache *cachep;
107374@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
107375
107376 if (unlikely(ZERO_OR_NULL_PTR(objp)))
107377 return;
107378+ VM_BUG_ON(!virt_addr_valid(objp));
107379 local_irq_save(flags);
107380 kfree_debugcheck(objp);
107381 c = virt_to_cache(objp);
107382@@ -3981,14 +4000,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
107383 }
107384 /* cpu stats */
107385 {
107386- unsigned long allochit = atomic_read(&cachep->allochit);
107387- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
107388- unsigned long freehit = atomic_read(&cachep->freehit);
107389- unsigned long freemiss = atomic_read(&cachep->freemiss);
107390+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
107391+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
107392+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
107393+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
107394
107395 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
107396 allochit, allocmiss, freehit, freemiss);
107397 }
107398+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107399+ {
107400+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
107401+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
107402+
107403+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
107404+ }
107405+#endif
107406 #endif
107407 }
107408
107409@@ -4196,13 +4223,69 @@ static const struct file_operations proc_slabstats_operations = {
107410 static int __init slab_proc_init(void)
107411 {
107412 #ifdef CONFIG_DEBUG_SLAB_LEAK
107413- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
107414+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
107415 #endif
107416 return 0;
107417 }
107418 module_init(slab_proc_init);
107419 #endif
107420
107421+bool is_usercopy_object(const void *ptr)
107422+{
107423+ struct page *page;
107424+ struct kmem_cache *cachep;
107425+
107426+ if (ZERO_OR_NULL_PTR(ptr))
107427+ return false;
107428+
107429+ if (!slab_is_available())
107430+ return false;
107431+
107432+ if (!virt_addr_valid(ptr))
107433+ return false;
107434+
107435+ page = virt_to_head_page(ptr);
107436+
107437+ if (!PageSlab(page))
107438+ return false;
107439+
107440+ cachep = page->slab_cache;
107441+ return cachep->flags & SLAB_USERCOPY;
107442+}
107443+
107444+#ifdef CONFIG_PAX_USERCOPY
107445+const char *check_heap_object(const void *ptr, unsigned long n)
107446+{
107447+ struct page *page;
107448+ struct kmem_cache *cachep;
107449+ unsigned int objnr;
107450+ unsigned long offset;
107451+
107452+ if (ZERO_OR_NULL_PTR(ptr))
107453+ return "<null>";
107454+
107455+ if (!virt_addr_valid(ptr))
107456+ return NULL;
107457+
107458+ page = virt_to_head_page(ptr);
107459+
107460+ if (!PageSlab(page))
107461+ return NULL;
107462+
107463+ cachep = page->slab_cache;
107464+ if (!(cachep->flags & SLAB_USERCOPY))
107465+ return cachep->name;
107466+
107467+ objnr = obj_to_index(cachep, page, ptr);
107468+ BUG_ON(objnr >= cachep->num);
107469+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
107470+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
107471+ return NULL;
107472+
107473+ return cachep->name;
107474+}
107475+#endif
107476+
107477 /**
107478 * ksize - get the actual amount of memory allocated for a given object
107479 * @objp: Pointer to the object
107480diff --git a/mm/slab.h b/mm/slab.h
107481index 4c3ac12..7b2e470 100644
107482--- a/mm/slab.h
107483+++ b/mm/slab.h
107484@@ -22,7 +22,7 @@ struct kmem_cache {
107485 unsigned int align; /* Alignment as calculated */
107486 unsigned long flags; /* Active flags on the slab */
107487 const char *name; /* Slab name for sysfs */
107488- int refcount; /* Use counter */
107489+ atomic_t refcount; /* Use counter */
107490 void (*ctor)(void *); /* Called on object slot creation */
107491 struct list_head list; /* List of all slab caches on the system */
107492 };
107493@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
107494 /* The slab cache that manages slab cache information */
107495 extern struct kmem_cache *kmem_cache;
107496
107497+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107498+#ifdef CONFIG_X86_64
107499+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
107500+#else
107501+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
107502+#endif
107503+enum pax_sanitize_mode {
107504+ PAX_SANITIZE_SLAB_OFF = 0,
107505+ PAX_SANITIZE_SLAB_FAST,
107506+ PAX_SANITIZE_SLAB_FULL,
107507+};
107508+extern enum pax_sanitize_mode pax_sanitize_slab;
107509+#endif
107510+
107511 unsigned long calculate_alignment(unsigned long flags,
107512 unsigned long align, unsigned long size);
107513
107514@@ -114,7 +128,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
107515
107516 /* Legal flag mask for kmem_cache_create(), for various configurations */
107517 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
107518- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
107519+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
107520+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
107521
107522 #if defined(CONFIG_DEBUG_SLAB)
107523 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
107524@@ -315,6 +330,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
107525 return s;
107526
107527 page = virt_to_head_page(x);
107528+
107529+ BUG_ON(!PageSlab(page));
107530+
107531 cachep = page->slab_cache;
107532 if (slab_equal_or_root(cachep, s))
107533 return cachep;
107534diff --git a/mm/slab_common.c b/mm/slab_common.c
107535index 999bb34..9843aea 100644
107536--- a/mm/slab_common.c
107537+++ b/mm/slab_common.c
107538@@ -25,11 +25,35 @@
107539
107540 #include "slab.h"
107541
107542-enum slab_state slab_state;
107543+enum slab_state slab_state __read_only;
107544 LIST_HEAD(slab_caches);
107545 DEFINE_MUTEX(slab_mutex);
107546 struct kmem_cache *kmem_cache;
107547
107548+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107549+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
107550+static int __init pax_sanitize_slab_setup(char *str)
107551+{
107552+ if (!str)
107553+ return 0;
107554+
107555+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
107556+ pr_info("PaX slab sanitization: %s\n", "disabled");
107557+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
107558+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
107559+ pr_info("PaX slab sanitization: %s\n", "fast");
107560+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
107561+ } else if (!strcmp(str, "full")) {
107562+ pr_info("PaX slab sanitization: %s\n", "full");
107563+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
107564+ } else
107565+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
107566+
107567+ return 0;
107568+}
107569+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
107570+#endif
107571+
107572 /*
107573 * Set of flags that will prevent slab merging
107574 */
107575@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
107576 * Merge control. If this is set then no merging of slab caches will occur.
107577 * (Could be removed. This was introduced to pacify the merge skeptics.)
107578 */
107579-static int slab_nomerge;
107580+static int slab_nomerge = 1;
107581
107582 static int __init setup_slab_nomerge(char *str)
107583 {
107584@@ -217,7 +241,7 @@ int slab_unmergeable(struct kmem_cache *s)
107585 /*
107586 * We may have set a slab to be unmergeable during bootstrap.
107587 */
107588- if (s->refcount < 0)
107589+ if (atomic_read(&s->refcount) < 0)
107590 return 1;
107591
107592 return 0;
107593@@ -321,7 +345,7 @@ do_kmem_cache_create(const char *name, size_t object_size, size_t size,
107594 if (err)
107595 goto out_free_cache;
107596
107597- s->refcount = 1;
107598+ atomic_set(&s->refcount, 1);
107599 list_add(&s->list, &slab_caches);
107600 out:
107601 if (err)
107602@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
107603 */
107604 flags &= CACHE_CREATE_MASK;
107605
107606+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107607+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
107608+ flags |= SLAB_NO_SANITIZE;
107609+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
107610+ flags &= ~SLAB_NO_SANITIZE;
107611+#endif
107612+
107613 s = __kmem_cache_alias(name, size, align, flags, ctor);
107614 if (s)
107615 goto out_unlock;
107616@@ -456,7 +487,7 @@ static void do_kmem_cache_release(struct list_head *release,
107617 rcu_barrier();
107618
107619 list_for_each_entry_safe(s, s2, release, list) {
107620-#ifdef SLAB_SUPPORTS_SYSFS
107621+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
107622 sysfs_slab_remove(s);
107623 #else
107624 slab_kmem_cache_release(s);
107625@@ -625,8 +656,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
107626
107627 mutex_lock(&slab_mutex);
107628
107629- s->refcount--;
107630- if (s->refcount)
107631+ if (!atomic_dec_and_test(&s->refcount))
107632 goto out_unlock;
107633
107634 for_each_memcg_cache_safe(c, c2, s) {
107635@@ -691,7 +721,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
107636 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
107637 name, size, err);
107638
107639- s->refcount = -1; /* Exempt from merging for now */
107640+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
107641 }
107642
107643 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
107644@@ -704,7 +734,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
107645
107646 create_boot_cache(s, name, size, flags);
107647 list_add(&s->list, &slab_caches);
107648- s->refcount = 1;
107649+ atomic_set(&s->refcount, 1);
107650 return s;
107651 }
107652
107653@@ -716,6 +746,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
107654 EXPORT_SYMBOL(kmalloc_dma_caches);
107655 #endif
107656
107657+#ifdef CONFIG_PAX_USERCOPY_SLABS
107658+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
107659+EXPORT_SYMBOL(kmalloc_usercopy_caches);
107660+#endif
107661+
107662 /*
107663 * Conversion table for small slabs sizes / 8 to the index in the
107664 * kmalloc array. This is necessary for slabs < 192 since we have non power
107665@@ -780,6 +815,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
107666 return kmalloc_dma_caches[index];
107667
107668 #endif
107669+
107670+#ifdef CONFIG_PAX_USERCOPY_SLABS
107671+ if (unlikely((flags & GFP_USERCOPY)))
107672+ return kmalloc_usercopy_caches[index];
107673+
107674+#endif
107675+
107676 return kmalloc_caches[index];
107677 }
107678
107679@@ -836,7 +878,7 @@ void __init create_kmalloc_caches(unsigned long flags)
107680 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
107681 if (!kmalloc_caches[i]) {
107682 kmalloc_caches[i] = create_kmalloc_cache(NULL,
107683- 1 << i, flags);
107684+ 1 << i, SLAB_USERCOPY | flags);
107685 }
107686
107687 /*
107688@@ -845,10 +887,10 @@ void __init create_kmalloc_caches(unsigned long flags)
107689 * earlier power of two caches
107690 */
107691 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
107692- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
107693+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
107694
107695 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
107696- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
107697+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
107698 }
107699
107700 /* Kmalloc array is now usable */
107701@@ -881,6 +923,23 @@ void __init create_kmalloc_caches(unsigned long flags)
107702 }
107703 }
107704 #endif
107705+
107706+#ifdef CONFIG_PAX_USERCOPY_SLABS
107707+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
107708+ struct kmem_cache *s = kmalloc_caches[i];
107709+
107710+ if (s) {
107711+ int size = kmalloc_size(i);
107712+ char *n = kasprintf(GFP_NOWAIT,
107713+ "usercopy-kmalloc-%d", size);
107714+
107715+ BUG_ON(!n);
107716+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
107717+ size, SLAB_USERCOPY | flags);
107718+ }
107719+ }
107720+#endif
107721+
107722 }
107723 #endif /* !CONFIG_SLOB */
107724
107725@@ -940,6 +999,9 @@ static void print_slabinfo_header(struct seq_file *m)
107726 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
107727 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
107728 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
107729+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107730+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
107731+#endif
107732 #endif
107733 seq_putc(m, '\n');
107734 }
107735@@ -1069,7 +1131,7 @@ static int __init slab_proc_init(void)
107736 module_init(slab_proc_init);
107737 #endif /* CONFIG_SLABINFO */
107738
107739-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
107740+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
107741 gfp_t flags)
107742 {
107743 void *ret;
107744diff --git a/mm/slob.c b/mm/slob.c
107745index 94a7fed..cf3fb1a 100644
107746--- a/mm/slob.c
107747+++ b/mm/slob.c
107748@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
107749 /*
107750 * Return the size of a slob block.
107751 */
107752-static slobidx_t slob_units(slob_t *s)
107753+static slobidx_t slob_units(const slob_t *s)
107754 {
107755 if (s->units > 0)
107756 return s->units;
107757@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
107758 /*
107759 * Return the next free slob block pointer after this one.
107760 */
107761-static slob_t *slob_next(slob_t *s)
107762+static slob_t *slob_next(const slob_t *s)
107763 {
107764 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
107765 slobidx_t next;
107766@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
107767 /*
107768 * Returns true if s is the last free block in its page.
107769 */
107770-static int slob_last(slob_t *s)
107771+static int slob_last(const slob_t *s)
107772 {
107773 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
107774 }
107775
107776-static void *slob_new_pages(gfp_t gfp, int order, int node)
107777+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
107778 {
107779- void *page;
107780+ struct page *page;
107781
107782 #ifdef CONFIG_NUMA
107783 if (node != NUMA_NO_NODE)
107784@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
107785 if (!page)
107786 return NULL;
107787
107788- return page_address(page);
107789+ __SetPageSlab(page);
107790+ return page;
107791 }
107792
107793-static void slob_free_pages(void *b, int order)
107794+static void slob_free_pages(struct page *sp, int order)
107795 {
107796 if (current->reclaim_state)
107797 current->reclaim_state->reclaimed_slab += 1 << order;
107798- free_pages((unsigned long)b, order);
107799+ __ClearPageSlab(sp);
107800+ page_mapcount_reset(sp);
107801+ sp->private = 0;
107802+ __free_pages(sp, order);
107803 }
107804
107805 /*
107806@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
107807
107808 /* Not enough space: must allocate a new page */
107809 if (!b) {
107810- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
107811- if (!b)
107812+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
107813+ if (!sp)
107814 return NULL;
107815- sp = virt_to_page(b);
107816- __SetPageSlab(sp);
107817+ b = page_address(sp);
107818
107819 spin_lock_irqsave(&slob_lock, flags);
107820 sp->units = SLOB_UNITS(PAGE_SIZE);
107821 sp->freelist = b;
107822+ sp->private = 0;
107823 INIT_LIST_HEAD(&sp->lru);
107824 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
107825 set_slob_page_free(sp, slob_list);
107826@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
107827 /*
107828 * slob_free: entry point into the slob allocator.
107829 */
107830-static void slob_free(void *block, int size)
107831+static void slob_free(struct kmem_cache *c, void *block, int size)
107832 {
107833 struct page *sp;
107834 slob_t *prev, *next, *b = (slob_t *)block;
107835@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
107836 if (slob_page_free(sp))
107837 clear_slob_page_free(sp);
107838 spin_unlock_irqrestore(&slob_lock, flags);
107839- __ClearPageSlab(sp);
107840- page_mapcount_reset(sp);
107841- slob_free_pages(b, 0);
107842+ slob_free_pages(sp, 0);
107843 return;
107844 }
107845
107846+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107847+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
107848+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
107849+#endif
107850+
107851 if (!slob_page_free(sp)) {
107852 /* This slob page is about to become partially free. Easy! */
107853 sp->units = units;
107854@@ -424,11 +431,10 @@ out:
107855 */
107856
107857 static __always_inline void *
107858-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
107859+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
107860 {
107861- unsigned int *m;
107862- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
107863- void *ret;
107864+ slob_t *m;
107865+ void *ret = NULL;
107866
107867 gfp &= gfp_allowed_mask;
107868
107869@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
107870
107871 if (!m)
107872 return NULL;
107873- *m = size;
107874+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
107875+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
107876+ m[0].units = size;
107877+ m[1].units = align;
107878 ret = (void *)m + align;
107879
107880 trace_kmalloc_node(caller, ret,
107881 size, size + align, gfp, node);
107882 } else {
107883 unsigned int order = get_order(size);
107884+ struct page *page;
107885
107886 if (likely(order))
107887 gfp |= __GFP_COMP;
107888- ret = slob_new_pages(gfp, order, node);
107889+ page = slob_new_pages(gfp, order, node);
107890+ if (page) {
107891+ ret = page_address(page);
107892+ page->private = size;
107893+ }
107894
107895 trace_kmalloc_node(caller, ret,
107896 size, PAGE_SIZE << order, gfp, node);
107897 }
107898
107899- kmemleak_alloc(ret, size, 1, gfp);
107900 return ret;
107901 }
107902
107903-void *__kmalloc(size_t size, gfp_t gfp)
107904+static __always_inline void *
107905+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
107906+{
107907+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
107908+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
107909+
107910+ if (!ZERO_OR_NULL_PTR(ret))
107911+ kmemleak_alloc(ret, size, 1, gfp);
107912+ return ret;
107913+}
107914+
107915+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
107916 {
107917 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
107918 }
107919@@ -491,34 +515,112 @@ void kfree(const void *block)
107920 return;
107921 kmemleak_free(block);
107922
107923+ VM_BUG_ON(!virt_addr_valid(block));
107924 sp = virt_to_page(block);
107925- if (PageSlab(sp)) {
107926+ VM_BUG_ON(!PageSlab(sp));
107927+ if (!sp->private) {
107928 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
107929- unsigned int *m = (unsigned int *)(block - align);
107930- slob_free(m, *m + align);
107931- } else
107932+ slob_t *m = (slob_t *)(block - align);
107933+ slob_free(NULL, m, m[0].units + align);
107934+ } else {
107935+ __ClearPageSlab(sp);
107936+ page_mapcount_reset(sp);
107937+ sp->private = 0;
107938 __free_pages(sp, compound_order(sp));
107939+ }
107940 }
107941 EXPORT_SYMBOL(kfree);
107942
107943+bool is_usercopy_object(const void *ptr)
107944+{
107945+ if (!slab_is_available())
107946+ return false;
107947+
107948+ // PAX: TODO
107949+
107950+ return false;
107951+}
107952+
107953+#ifdef CONFIG_PAX_USERCOPY
107954+const char *check_heap_object(const void *ptr, unsigned long n)
107955+{
107956+ struct page *page;
107957+ const slob_t *free;
107958+ const void *base;
107959+ unsigned long flags;
107960+
107961+ if (ZERO_OR_NULL_PTR(ptr))
107962+ return "<null>";
107963+
107964+ if (!virt_addr_valid(ptr))
107965+ return NULL;
107966+
107967+ page = virt_to_head_page(ptr);
107968+ if (!PageSlab(page))
107969+ return NULL;
107970+
107971+ if (page->private) {
107972+ base = page;
107973+ if (base <= ptr && n <= page->private - (ptr - base))
107974+ return NULL;
107975+ return "<slob>";
107976+ }
107977+
107978+ /* some tricky double walking to find the chunk */
107979+ spin_lock_irqsave(&slob_lock, flags);
107980+ base = (void *)((unsigned long)ptr & PAGE_MASK);
107981+ free = page->freelist;
107982+
107983+ while (!slob_last(free) && (void *)free <= ptr) {
107984+ base = free + slob_units(free);
107985+ free = slob_next(free);
107986+ }
107987+
107988+ while (base < (void *)free) {
107989+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
107990+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
107991+ int offset;
107992+
107993+ if (ptr < base + align)
107994+ break;
107995+
107996+ offset = ptr - base - align;
107997+ if (offset >= m) {
107998+ base += size;
107999+ continue;
108000+ }
108001+
108002+ if (n > m - offset)
108003+ break;
108004+
108005+ spin_unlock_irqrestore(&slob_lock, flags);
108006+ return NULL;
108007+ }
108008+
108009+ spin_unlock_irqrestore(&slob_lock, flags);
108010+ return "<slob>";
108011+}
108012+#endif
108013+
108014 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
108015 size_t ksize(const void *block)
108016 {
108017 struct page *sp;
108018 int align;
108019- unsigned int *m;
108020+ slob_t *m;
108021
108022 BUG_ON(!block);
108023 if (unlikely(block == ZERO_SIZE_PTR))
108024 return 0;
108025
108026 sp = virt_to_page(block);
108027- if (unlikely(!PageSlab(sp)))
108028- return PAGE_SIZE << compound_order(sp);
108029+ VM_BUG_ON(!PageSlab(sp));
108030+ if (sp->private)
108031+ return sp->private;
108032
108033 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108034- m = (unsigned int *)(block - align);
108035- return SLOB_UNITS(*m) * SLOB_UNIT;
108036+ m = (slob_t *)(block - align);
108037+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
108038 }
108039 EXPORT_SYMBOL(ksize);
108040
108041@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
108042
108043 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
108044 {
108045- void *b;
108046+ void *b = NULL;
108047
108048 flags &= gfp_allowed_mask;
108049
108050 lockdep_trace_alloc(flags);
108051
108052+#ifdef CONFIG_PAX_USERCOPY_SLABS
108053+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
108054+#else
108055 if (c->size < PAGE_SIZE) {
108056 b = slob_alloc(c->size, flags, c->align, node);
108057 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
108058 SLOB_UNITS(c->size) * SLOB_UNIT,
108059 flags, node);
108060 } else {
108061- b = slob_new_pages(flags, get_order(c->size), node);
108062+ struct page *sp;
108063+
108064+ sp = slob_new_pages(flags, get_order(c->size), node);
108065+ if (sp) {
108066+ b = page_address(sp);
108067+ sp->private = c->size;
108068+ }
108069 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
108070 PAGE_SIZE << get_order(c->size),
108071 flags, node);
108072 }
108073+#endif
108074
108075 if (b && c->ctor)
108076 c->ctor(b);
108077@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
108078 EXPORT_SYMBOL(kmem_cache_alloc);
108079
108080 #ifdef CONFIG_NUMA
108081-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
108082+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
108083 {
108084 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
108085 }
108086@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
108087 EXPORT_SYMBOL(kmem_cache_alloc_node);
108088 #endif
108089
108090-static void __kmem_cache_free(void *b, int size)
108091+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
108092 {
108093- if (size < PAGE_SIZE)
108094- slob_free(b, size);
108095+ struct page *sp;
108096+
108097+ sp = virt_to_page(b);
108098+ BUG_ON(!PageSlab(sp));
108099+ if (!sp->private)
108100+ slob_free(c, b, size);
108101 else
108102- slob_free_pages(b, get_order(size));
108103+ slob_free_pages(sp, get_order(size));
108104 }
108105
108106 static void kmem_rcu_free(struct rcu_head *head)
108107@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
108108 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
108109 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
108110
108111- __kmem_cache_free(b, slob_rcu->size);
108112+ __kmem_cache_free(NULL, b, slob_rcu->size);
108113 }
108114
108115 void kmem_cache_free(struct kmem_cache *c, void *b)
108116 {
108117+ int size = c->size;
108118+
108119+#ifdef CONFIG_PAX_USERCOPY_SLABS
108120+ if (size + c->align < PAGE_SIZE) {
108121+ size += c->align;
108122+ b -= c->align;
108123+ }
108124+#endif
108125+
108126 kmemleak_free_recursive(b, c->flags);
108127 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
108128 struct slob_rcu *slob_rcu;
108129- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
108130- slob_rcu->size = c->size;
108131+ slob_rcu = b + (size - sizeof(struct slob_rcu));
108132+ slob_rcu->size = size;
108133 call_rcu(&slob_rcu->head, kmem_rcu_free);
108134 } else {
108135- __kmem_cache_free(b, c->size);
108136+ __kmem_cache_free(c, b, size);
108137 }
108138
108139+#ifdef CONFIG_PAX_USERCOPY_SLABS
108140+ trace_kfree(_RET_IP_, b);
108141+#else
108142 trace_kmem_cache_free(_RET_IP_, b);
108143+#endif
108144+
108145 }
108146 EXPORT_SYMBOL(kmem_cache_free);
108147
108148diff --git a/mm/slub.c b/mm/slub.c
108149index 82c4737..55c316a 100644
108150--- a/mm/slub.c
108151+++ b/mm/slub.c
108152@@ -198,7 +198,7 @@ struct track {
108153
108154 enum track_item { TRACK_ALLOC, TRACK_FREE };
108155
108156-#ifdef CONFIG_SYSFS
108157+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108158 static int sysfs_slab_add(struct kmem_cache *);
108159 static int sysfs_slab_alias(struct kmem_cache *, const char *);
108160 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
108161@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
108162 if (!t->addr)
108163 return;
108164
108165- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
108166+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
108167 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
108168 #ifdef CONFIG_STACKTRACE
108169 {
108170@@ -2709,6 +2709,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
108171
108172 slab_free_hook(s, x);
108173
108174+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108175+ if (!(s->flags & SLAB_NO_SANITIZE)) {
108176+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
108177+ if (s->ctor)
108178+ s->ctor(x);
108179+ }
108180+#endif
108181+
108182 redo:
108183 /*
108184 * Determine the currently cpus per cpu slab.
108185@@ -3050,6 +3058,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
108186 s->inuse = size;
108187
108188 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
108189+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108190+ (!(flags & SLAB_NO_SANITIZE)) ||
108191+#endif
108192 s->ctor)) {
108193 /*
108194 * Relocate free pointer after the object if it is not
108195@@ -3304,7 +3315,7 @@ static int __init setup_slub_min_objects(char *str)
108196
108197 __setup("slub_min_objects=", setup_slub_min_objects);
108198
108199-void *__kmalloc(size_t size, gfp_t flags)
108200+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
108201 {
108202 struct kmem_cache *s;
108203 void *ret;
108204@@ -3342,7 +3353,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
108205 return ptr;
108206 }
108207
108208-void *__kmalloc_node(size_t size, gfp_t flags, int node)
108209+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
108210 {
108211 struct kmem_cache *s;
108212 void *ret;
108213@@ -3390,6 +3401,59 @@ static size_t __ksize(const void *object)
108214 return slab_ksize(page->slab_cache);
108215 }
108216
108217+bool is_usercopy_object(const void *ptr)
108218+{
108219+ struct page *page;
108220+ struct kmem_cache *s;
108221+
108222+ if (ZERO_OR_NULL_PTR(ptr))
108223+ return false;
108224+
108225+ if (!slab_is_available())
108226+ return false;
108227+
108228+ if (!virt_addr_valid(ptr))
108229+ return false;
108230+
108231+ page = virt_to_head_page(ptr);
108232+
108233+ if (!PageSlab(page))
108234+ return false;
108235+
108236+ s = page->slab_cache;
108237+ return s->flags & SLAB_USERCOPY;
108238+}
108239+
108240+#ifdef CONFIG_PAX_USERCOPY
108241+const char *check_heap_object(const void *ptr, unsigned long n)
108242+{
108243+ struct page *page;
108244+ struct kmem_cache *s;
108245+ unsigned long offset;
108246+
108247+ if (ZERO_OR_NULL_PTR(ptr))
108248+ return "<null>";
108249+
108250+ if (!virt_addr_valid(ptr))
108251+ return NULL;
108252+
108253+ page = virt_to_head_page(ptr);
108254+
108255+ if (!PageSlab(page))
108256+ return NULL;
108257+
108258+ s = page->slab_cache;
108259+ if (!(s->flags & SLAB_USERCOPY))
108260+ return s->name;
108261+
108262+ offset = (ptr - page_address(page)) % s->size;
108263+ if (offset <= s->object_size && n <= s->object_size - offset)
108264+ return NULL;
108265+
108266+ return s->name;
108267+}
108268+#endif
108269+
108270 size_t ksize(const void *object)
108271 {
108272 size_t size = __ksize(object);
108273@@ -3410,6 +3474,7 @@ void kfree(const void *x)
108274 if (unlikely(ZERO_OR_NULL_PTR(x)))
108275 return;
108276
108277+ VM_BUG_ON(!virt_addr_valid(x));
108278 page = virt_to_head_page(x);
108279 if (unlikely(!PageSlab(page))) {
108280 BUG_ON(!PageCompound(page));
108281@@ -3726,7 +3791,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
108282
108283 s = find_mergeable(size, align, flags, name, ctor);
108284 if (s) {
108285- s->refcount++;
108286+ atomic_inc(&s->refcount);
108287
108288 /*
108289 * Adjust the object sizes so that we clear
108290@@ -3742,7 +3807,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
108291 }
108292
108293 if (sysfs_slab_alias(s, name)) {
108294- s->refcount--;
108295+ atomic_dec(&s->refcount);
108296 s = NULL;
108297 }
108298 }
108299@@ -3859,7 +3924,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
108300 }
108301 #endif
108302
108303-#ifdef CONFIG_SYSFS
108304+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108305 static int count_inuse(struct page *page)
108306 {
108307 return page->inuse;
108308@@ -4140,7 +4205,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
108309 len += sprintf(buf + len, "%7ld ", l->count);
108310
108311 if (l->addr)
108312+#ifdef CONFIG_GRKERNSEC_HIDESYM
108313+ len += sprintf(buf + len, "%pS", NULL);
108314+#else
108315 len += sprintf(buf + len, "%pS", (void *)l->addr);
108316+#endif
108317 else
108318 len += sprintf(buf + len, "<not-available>");
108319
108320@@ -4238,12 +4307,12 @@ static void __init resiliency_test(void)
108321 validate_slab_cache(kmalloc_caches[9]);
108322 }
108323 #else
108324-#ifdef CONFIG_SYSFS
108325+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108326 static void resiliency_test(void) {};
108327 #endif
108328 #endif
108329
108330-#ifdef CONFIG_SYSFS
108331+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108332 enum slab_stat_type {
108333 SL_ALL, /* All slabs */
108334 SL_PARTIAL, /* Only partially allocated slabs */
108335@@ -4480,13 +4549,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
108336 {
108337 if (!s->ctor)
108338 return 0;
108339+#ifdef CONFIG_GRKERNSEC_HIDESYM
108340+ return sprintf(buf, "%pS\n", NULL);
108341+#else
108342 return sprintf(buf, "%pS\n", s->ctor);
108343+#endif
108344 }
108345 SLAB_ATTR_RO(ctor);
108346
108347 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
108348 {
108349- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
108350+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
108351 }
108352 SLAB_ATTR_RO(aliases);
108353
108354@@ -4574,6 +4647,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
108355 SLAB_ATTR_RO(cache_dma);
108356 #endif
108357
108358+#ifdef CONFIG_PAX_USERCOPY_SLABS
108359+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
108360+{
108361+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
108362+}
108363+SLAB_ATTR_RO(usercopy);
108364+#endif
108365+
108366+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108367+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
108368+{
108369+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
108370+}
108371+SLAB_ATTR_RO(sanitize);
108372+#endif
108373+
108374 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
108375 {
108376 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
108377@@ -4629,7 +4718,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
108378 * as well as cause other issues like converting a mergeable
108379 * cache into an umergeable one.
108380 */
108381- if (s->refcount > 1)
108382+ if (atomic_read(&s->refcount) > 1)
108383 return -EINVAL;
108384
108385 s->flags &= ~SLAB_TRACE;
108386@@ -4749,7 +4838,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
108387 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
108388 size_t length)
108389 {
108390- if (s->refcount > 1)
108391+ if (atomic_read(&s->refcount) > 1)
108392 return -EINVAL;
108393
108394 s->flags &= ~SLAB_FAILSLAB;
108395@@ -4916,6 +5005,12 @@ static struct attribute *slab_attrs[] = {
108396 #ifdef CONFIG_ZONE_DMA
108397 &cache_dma_attr.attr,
108398 #endif
108399+#ifdef CONFIG_PAX_USERCOPY_SLABS
108400+ &usercopy_attr.attr,
108401+#endif
108402+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108403+ &sanitize_attr.attr,
108404+#endif
108405 #ifdef CONFIG_NUMA
108406 &remote_node_defrag_ratio_attr.attr,
108407 #endif
108408@@ -5157,6 +5252,7 @@ static char *create_unique_id(struct kmem_cache *s)
108409 return name;
108410 }
108411
108412+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108413 static int sysfs_slab_add(struct kmem_cache *s)
108414 {
108415 int err;
108416@@ -5230,6 +5326,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
108417 kobject_del(&s->kobj);
108418 kobject_put(&s->kobj);
108419 }
108420+#endif
108421
108422 /*
108423 * Need to buffer aliases during bootup until sysfs becomes
108424@@ -5243,6 +5340,7 @@ struct saved_alias {
108425
108426 static struct saved_alias *alias_list;
108427
108428+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108429 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
108430 {
108431 struct saved_alias *al;
108432@@ -5265,6 +5363,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
108433 alias_list = al;
108434 return 0;
108435 }
108436+#endif
108437
108438 static int __init slab_sysfs_init(void)
108439 {
108440diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
108441index 4cba9c2..b4f9fcc 100644
108442--- a/mm/sparse-vmemmap.c
108443+++ b/mm/sparse-vmemmap.c
108444@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
108445 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
108446 if (!p)
108447 return NULL;
108448- pud_populate(&init_mm, pud, p);
108449+ pud_populate_kernel(&init_mm, pud, p);
108450 }
108451 return pud;
108452 }
108453@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
108454 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
108455 if (!p)
108456 return NULL;
108457- pgd_populate(&init_mm, pgd, p);
108458+ pgd_populate_kernel(&init_mm, pgd, p);
108459 }
108460 return pgd;
108461 }
108462diff --git a/mm/sparse.c b/mm/sparse.c
108463index d1b48b6..6e8590e 100644
108464--- a/mm/sparse.c
108465+++ b/mm/sparse.c
108466@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
108467
108468 for (i = 0; i < PAGES_PER_SECTION; i++) {
108469 if (PageHWPoison(&memmap[i])) {
108470- atomic_long_sub(1, &num_poisoned_pages);
108471+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
108472 ClearPageHWPoison(&memmap[i]);
108473 }
108474 }
108475diff --git a/mm/swap.c b/mm/swap.c
108476index cd3a5e6..40c0c8f 100644
108477--- a/mm/swap.c
108478+++ b/mm/swap.c
108479@@ -31,6 +31,7 @@
108480 #include <linux/memcontrol.h>
108481 #include <linux/gfp.h>
108482 #include <linux/uio.h>
108483+#include <linux/hugetlb.h>
108484
108485 #include "internal.h"
108486
108487@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
108488
108489 __page_cache_release(page);
108490 dtor = get_compound_page_dtor(page);
108491+ if (!PageHuge(page))
108492+ BUG_ON(dtor != free_compound_page);
108493 (*dtor)(page);
108494 }
108495
108496diff --git a/mm/swapfile.c b/mm/swapfile.c
108497index 63f55cc..31874e6 100644
108498--- a/mm/swapfile.c
108499+++ b/mm/swapfile.c
108500@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
108501
108502 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
108503 /* Activity counter to indicate that a swapon or swapoff has occurred */
108504-static atomic_t proc_poll_event = ATOMIC_INIT(0);
108505+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
108506
108507 static inline unsigned char swap_count(unsigned char ent)
108508 {
108509@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
108510 spin_unlock(&swap_lock);
108511
108512 err = 0;
108513- atomic_inc(&proc_poll_event);
108514+ atomic_inc_unchecked(&proc_poll_event);
108515 wake_up_interruptible(&proc_poll_wait);
108516
108517 out_dput:
108518@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
108519
108520 poll_wait(file, &proc_poll_wait, wait);
108521
108522- if (seq->poll_event != atomic_read(&proc_poll_event)) {
108523- seq->poll_event = atomic_read(&proc_poll_event);
108524+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
108525+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
108526 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
108527 }
108528
108529@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
108530 return ret;
108531
108532 seq = file->private_data;
108533- seq->poll_event = atomic_read(&proc_poll_event);
108534+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
108535 return 0;
108536 }
108537
108538@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
108539 (frontswap_map) ? "FS" : "");
108540
108541 mutex_unlock(&swapon_mutex);
108542- atomic_inc(&proc_poll_event);
108543+ atomic_inc_unchecked(&proc_poll_event);
108544 wake_up_interruptible(&proc_poll_wait);
108545
108546 if (S_ISREG(inode->i_mode))
108547diff --git a/mm/util.c b/mm/util.c
108548index 3981ae9..28b585b 100644
108549--- a/mm/util.c
108550+++ b/mm/util.c
108551@@ -233,6 +233,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
108552 void arch_pick_mmap_layout(struct mm_struct *mm)
108553 {
108554 mm->mmap_base = TASK_UNMAPPED_BASE;
108555+
108556+#ifdef CONFIG_PAX_RANDMMAP
108557+ if (mm->pax_flags & MF_PAX_RANDMMAP)
108558+ mm->mmap_base += mm->delta_mmap;
108559+#endif
108560+
108561 mm->get_unmapped_area = arch_get_unmapped_area;
108562 }
108563 #endif
108564@@ -403,6 +409,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
108565 if (!mm->arg_end)
108566 goto out_mm; /* Shh! No looking before we're done */
108567
108568+ if (gr_acl_handle_procpidmem(task))
108569+ goto out_mm;
108570+
108571 len = mm->arg_end - mm->arg_start;
108572
108573 if (len > buflen)
108574diff --git a/mm/vmalloc.c b/mm/vmalloc.c
108575index 49abccf..7bd1931 100644
108576--- a/mm/vmalloc.c
108577+++ b/mm/vmalloc.c
108578@@ -39,20 +39,65 @@ struct vfree_deferred {
108579 struct work_struct wq;
108580 };
108581 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
108582+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
108583+
108584+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
108585+struct stack_deferred_llist {
108586+ struct llist_head list;
108587+ void *stack;
108588+ void *lowmem_stack;
108589+};
108590+
108591+struct stack_deferred {
108592+ struct stack_deferred_llist list;
108593+ struct work_struct wq;
108594+};
108595+
108596+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
108597+#endif
108598
108599 static void __vunmap(const void *, int);
108600
108601-static void free_work(struct work_struct *w)
108602+static void vfree_work(struct work_struct *w)
108603 {
108604 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
108605 struct llist_node *llnode = llist_del_all(&p->list);
108606 while (llnode) {
108607- void *p = llnode;
108608+ void *x = llnode;
108609 llnode = llist_next(llnode);
108610- __vunmap(p, 1);
108611+ __vunmap(x, 1);
108612 }
108613 }
108614
108615+static void vunmap_work(struct work_struct *w)
108616+{
108617+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
108618+ struct llist_node *llnode = llist_del_all(&p->list);
108619+ while (llnode) {
108620+ void *x = llnode;
108621+ llnode = llist_next(llnode);
108622+ __vunmap(x, 0);
108623+ }
108624+}
108625+
108626+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
108627+static void unmap_work(struct work_struct *w)
108628+{
108629+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
108630+ struct llist_node *llnode = llist_del_all(&p->list.list);
108631+ while (llnode) {
108632+ struct stack_deferred_llist *x =
108633+ llist_entry((struct llist_head *)llnode,
108634+ struct stack_deferred_llist, list);
108635+ void *stack = ACCESS_ONCE(x->stack);
108636+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
108637+ llnode = llist_next(llnode);
108638+ __vunmap(stack, 0);
108639+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
108640+ }
108641+}
108642+#endif
108643+
108644 /*** Page table manipulation functions ***/
108645
108646 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
108647@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
108648
108649 pte = pte_offset_kernel(pmd, addr);
108650 do {
108651- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
108652- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
108653+
108654+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
108655+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
108656+ BUG_ON(!pte_exec(*pte));
108657+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
108658+ continue;
108659+ }
108660+#endif
108661+
108662+ {
108663+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
108664+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
108665+ }
108666 } while (pte++, addr += PAGE_SIZE, addr != end);
108667 }
108668
108669@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
108670 pte = pte_alloc_kernel(pmd, addr);
108671 if (!pte)
108672 return -ENOMEM;
108673+
108674+ pax_open_kernel();
108675 do {
108676 struct page *page = pages[*nr];
108677
108678- if (WARN_ON(!pte_none(*pte)))
108679+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
108680+ if (pgprot_val(prot) & _PAGE_NX)
108681+#endif
108682+
108683+ if (!pte_none(*pte)) {
108684+ pax_close_kernel();
108685+ WARN_ON(1);
108686 return -EBUSY;
108687- if (WARN_ON(!page))
108688+ }
108689+ if (!page) {
108690+ pax_close_kernel();
108691+ WARN_ON(1);
108692 return -ENOMEM;
108693+ }
108694 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
108695 (*nr)++;
108696 } while (pte++, addr += PAGE_SIZE, addr != end);
108697+ pax_close_kernel();
108698 return 0;
108699 }
108700
108701@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
108702 pmd_t *pmd;
108703 unsigned long next;
108704
108705- pmd = pmd_alloc(&init_mm, pud, addr);
108706+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
108707 if (!pmd)
108708 return -ENOMEM;
108709 do {
108710@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
108711 pud_t *pud;
108712 unsigned long next;
108713
108714- pud = pud_alloc(&init_mm, pgd, addr);
108715+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
108716 if (!pud)
108717 return -ENOMEM;
108718 do {
108719@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
108720 if (addr >= MODULES_VADDR && addr < MODULES_END)
108721 return 1;
108722 #endif
108723+
108724+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
108725+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
108726+ return 1;
108727+#endif
108728+
108729 return is_vmalloc_addr(x);
108730 }
108731
108732@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
108733
108734 if (!pgd_none(*pgd)) {
108735 pud_t *pud = pud_offset(pgd, addr);
108736+#ifdef CONFIG_X86
108737+ if (!pud_large(*pud))
108738+#endif
108739 if (!pud_none(*pud)) {
108740 pmd_t *pmd = pmd_offset(pud, addr);
108741+#ifdef CONFIG_X86
108742+ if (!pmd_large(*pmd))
108743+#endif
108744 if (!pmd_none(*pmd)) {
108745 pte_t *ptep, pte;
108746
108747@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
108748 * Allocate a region of KVA of the specified size and alignment, within the
108749 * vstart and vend.
108750 */
108751-static struct vmap_area *alloc_vmap_area(unsigned long size,
108752+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
108753 unsigned long align,
108754 unsigned long vstart, unsigned long vend,
108755 int node, gfp_t gfp_mask)
108756@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
108757 for_each_possible_cpu(i) {
108758 struct vmap_block_queue *vbq;
108759 struct vfree_deferred *p;
108760+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
108761+ struct stack_deferred *p2;
108762+#endif
108763
108764 vbq = &per_cpu(vmap_block_queue, i);
108765 spin_lock_init(&vbq->lock);
108766 INIT_LIST_HEAD(&vbq->free);
108767+
108768 p = &per_cpu(vfree_deferred, i);
108769 init_llist_head(&p->list);
108770- INIT_WORK(&p->wq, free_work);
108771+ INIT_WORK(&p->wq, vfree_work);
108772+
108773+ p = &per_cpu(vunmap_deferred, i);
108774+ init_llist_head(&p->list);
108775+ INIT_WORK(&p->wq, vunmap_work);
108776+
108777+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
108778+ p2 = &per_cpu(stack_deferred, i);
108779+ init_llist_head(&p2->list.list);
108780+ INIT_WORK(&p2->wq, unmap_work);
108781+#endif
108782 }
108783
108784 /* Import existing vmlist entries. */
108785@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
108786 struct vm_struct *area;
108787
108788 BUG_ON(in_interrupt());
108789+
108790+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
108791+ if (flags & VM_KERNEXEC) {
108792+ if (start != VMALLOC_START || end != VMALLOC_END)
108793+ return NULL;
108794+ start = (unsigned long)MODULES_EXEC_VADDR;
108795+ end = (unsigned long)MODULES_EXEC_END;
108796+ }
108797+#endif
108798+
108799 if (flags & VM_IOREMAP)
108800 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
108801
108802@@ -1510,13 +1615,36 @@ EXPORT_SYMBOL(vfree);
108803 */
108804 void vunmap(const void *addr)
108805 {
108806- BUG_ON(in_interrupt());
108807- might_sleep();
108808- if (addr)
108809+ if (!addr)
108810+ return;
108811+ if (unlikely(in_interrupt())) {
108812+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
108813+ if (llist_add((struct llist_node *)addr, &p->list))
108814+ schedule_work(&p->wq);
108815+ } else {
108816+ might_sleep();
108817 __vunmap(addr, 0);
108818+ }
108819 }
108820 EXPORT_SYMBOL(vunmap);
108821
108822+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
108823+void unmap_process_stacks(struct task_struct *task)
108824+{
108825+ if (unlikely(in_interrupt())) {
108826+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
108827+ struct stack_deferred_llist *list = task->stack;
108828+ list->stack = task->stack;
108829+ list->lowmem_stack = task->lowmem_stack;
108830+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
108831+ schedule_work(&p->wq);
108832+ } else {
108833+ __vunmap(task->stack, 0);
108834+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
108835+ }
108836+}
108837+#endif
108838+
108839 /**
108840 * vmap - map an array of pages into virtually contiguous space
108841 * @pages: array of page pointers
108842@@ -1537,6 +1665,11 @@ void *vmap(struct page **pages, unsigned int count,
108843 if (count > totalram_pages)
108844 return NULL;
108845
108846+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
108847+ if (!(pgprot_val(prot) & _PAGE_NX))
108848+ flags |= VM_KERNEXEC;
108849+#endif
108850+
108851 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
108852 __builtin_return_address(0));
108853 if (!area)
108854@@ -1641,6 +1774,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
108855 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
108856 goto fail;
108857
108858+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
108859+ if (!(pgprot_val(prot) & _PAGE_NX)) {
108860+ vm_flags |= VM_KERNEXEC;
108861+ start = VMALLOC_START;
108862+ end = VMALLOC_END;
108863+ }
108864+#endif
108865+
108866 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
108867 vm_flags, start, end, node, gfp_mask, caller);
108868 if (!area)
108869@@ -1817,10 +1958,9 @@ EXPORT_SYMBOL(vzalloc_node);
108870 * For tight control over page level allocator and protection flags
108871 * use __vmalloc() instead.
108872 */
108873-
108874 void *vmalloc_exec(unsigned long size)
108875 {
108876- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
108877+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
108878 NUMA_NO_NODE, __builtin_return_address(0));
108879 }
108880
108881@@ -2127,6 +2267,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
108882 {
108883 struct vm_struct *area;
108884
108885+ BUG_ON(vma->vm_mirror);
108886+
108887 size = PAGE_ALIGN(size);
108888
108889 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
108890@@ -2609,7 +2751,11 @@ static int s_show(struct seq_file *m, void *p)
108891 v->addr, v->addr + v->size, v->size);
108892
108893 if (v->caller)
108894+#ifdef CONFIG_GRKERNSEC_HIDESYM
108895+ seq_printf(m, " %pK", v->caller);
108896+#else
108897 seq_printf(m, " %pS", v->caller);
108898+#endif
108899
108900 if (v->nr_pages)
108901 seq_printf(m, " pages=%d", v->nr_pages);
108902diff --git a/mm/vmstat.c b/mm/vmstat.c
108903index 4f5cd97..9fb715a 100644
108904--- a/mm/vmstat.c
108905+++ b/mm/vmstat.c
108906@@ -27,6 +27,7 @@
108907 #include <linux/mm_inline.h>
108908 #include <linux/page_ext.h>
108909 #include <linux/page_owner.h>
108910+#include <linux/grsecurity.h>
108911
108912 #include "internal.h"
108913
108914@@ -86,7 +87,7 @@ void vm_events_fold_cpu(int cpu)
108915 *
108916 * vm_stat contains the global counters
108917 */
108918-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
108919+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
108920 EXPORT_SYMBOL(vm_stat);
108921
108922 #ifdef CONFIG_SMP
108923@@ -438,7 +439,7 @@ static int fold_diff(int *diff)
108924
108925 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
108926 if (diff[i]) {
108927- atomic_long_add(diff[i], &vm_stat[i]);
108928+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
108929 changes++;
108930 }
108931 return changes;
108932@@ -476,7 +477,7 @@ static int refresh_cpu_vm_stats(void)
108933 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
108934 if (v) {
108935
108936- atomic_long_add(v, &zone->vm_stat[i]);
108937+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
108938 global_diff[i] += v;
108939 #ifdef CONFIG_NUMA
108940 /* 3 seconds idle till flush */
108941@@ -540,7 +541,7 @@ void cpu_vm_stats_fold(int cpu)
108942
108943 v = p->vm_stat_diff[i];
108944 p->vm_stat_diff[i] = 0;
108945- atomic_long_add(v, &zone->vm_stat[i]);
108946+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
108947 global_diff[i] += v;
108948 }
108949 }
108950@@ -560,8 +561,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
108951 if (pset->vm_stat_diff[i]) {
108952 int v = pset->vm_stat_diff[i];
108953 pset->vm_stat_diff[i] = 0;
108954- atomic_long_add(v, &zone->vm_stat[i]);
108955- atomic_long_add(v, &vm_stat[i]);
108956+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
108957+ atomic_long_add_unchecked(v, &vm_stat[i]);
108958 }
108959 }
108960 #endif
108961@@ -1293,10 +1294,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
108962 stat_items_size += sizeof(struct vm_event_state);
108963 #endif
108964
108965- v = kmalloc(stat_items_size, GFP_KERNEL);
108966+ v = kzalloc(stat_items_size, GFP_KERNEL);
108967 m->private = v;
108968 if (!v)
108969 return ERR_PTR(-ENOMEM);
108970+
108971+#ifdef CONFIG_GRKERNSEC_PROC_ADD
108972+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
108973+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
108974+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
108975+ && !in_group_p(grsec_proc_gid)
108976+#endif
108977+ )
108978+ return (unsigned long *)m->private + *pos;
108979+#endif
108980+#endif
108981+
108982 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
108983 v[i] = global_page_state(i);
108984 v += NR_VM_ZONE_STAT_ITEMS;
108985@@ -1528,10 +1541,16 @@ static int __init setup_vmstat(void)
108986 cpu_notifier_register_done();
108987 #endif
108988 #ifdef CONFIG_PROC_FS
108989- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
108990- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
108991- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
108992- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
108993+ {
108994+ mode_t gr_mode = S_IRUGO;
108995+#ifdef CONFIG_GRKERNSEC_PROC_ADD
108996+ gr_mode = S_IRUSR;
108997+#endif
108998+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
108999+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
109000+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
109001+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
109002+ }
109003 #endif
109004 return 0;
109005 }
109006diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
109007index 64c6bed..b79a5de 100644
109008--- a/net/8021q/vlan.c
109009+++ b/net/8021q/vlan.c
109010@@ -481,7 +481,7 @@ out:
109011 return NOTIFY_DONE;
109012 }
109013
109014-static struct notifier_block vlan_notifier_block __read_mostly = {
109015+static struct notifier_block vlan_notifier_block = {
109016 .notifier_call = vlan_device_event,
109017 };
109018
109019@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
109020 err = -EPERM;
109021 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
109022 break;
109023- if ((args.u.name_type >= 0) &&
109024- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
109025+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
109026 struct vlan_net *vn;
109027
109028 vn = net_generic(net, vlan_net_id);
109029diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
109030index c92b52f..006c052 100644
109031--- a/net/8021q/vlan_netlink.c
109032+++ b/net/8021q/vlan_netlink.c
109033@@ -245,7 +245,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
109034 return dev_net(real_dev);
109035 }
109036
109037-struct rtnl_link_ops vlan_link_ops __read_mostly = {
109038+struct rtnl_link_ops vlan_link_ops = {
109039 .kind = "vlan",
109040 .maxtype = IFLA_VLAN_MAX,
109041 .policy = vlan_policy,
109042diff --git a/net/9p/client.c b/net/9p/client.c
109043index e86a9bea..e91f70e 100644
109044--- a/net/9p/client.c
109045+++ b/net/9p/client.c
109046@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
109047 len - inline_len);
109048 } else {
109049 err = copy_from_user(ename + inline_len,
109050- uidata, len - inline_len);
109051+ (char __force_user *)uidata, len - inline_len);
109052 if (err) {
109053 err = -EFAULT;
109054 goto out_err;
109055@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
109056 kernel_buf = 1;
109057 indata = data;
109058 } else
109059- indata = (__force char *)udata;
109060+ indata = (__force_kernel char *)udata;
109061 /*
109062 * response header len is 11
109063 * PDU Header(7) + IO Size (4)
109064@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
109065 kernel_buf = 1;
109066 odata = data;
109067 } else
109068- odata = (char *)udata;
109069+ odata = (char __force_kernel *)udata;
109070 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
109071 P9_ZC_HDR_SZ, kernel_buf, "dqd",
109072 fid->fid, offset, rsize);
109073diff --git a/net/9p/mod.c b/net/9p/mod.c
109074index 6ab36ae..6f1841b 100644
109075--- a/net/9p/mod.c
109076+++ b/net/9p/mod.c
109077@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
109078 void v9fs_register_trans(struct p9_trans_module *m)
109079 {
109080 spin_lock(&v9fs_trans_lock);
109081- list_add_tail(&m->list, &v9fs_trans_list);
109082+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
109083 spin_unlock(&v9fs_trans_lock);
109084 }
109085 EXPORT_SYMBOL(v9fs_register_trans);
109086@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
109087 void v9fs_unregister_trans(struct p9_trans_module *m)
109088 {
109089 spin_lock(&v9fs_trans_lock);
109090- list_del_init(&m->list);
109091+ pax_list_del_init((struct list_head *)&m->list);
109092 spin_unlock(&v9fs_trans_lock);
109093 }
109094 EXPORT_SYMBOL(v9fs_unregister_trans);
109095diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
109096index 80d08f6..de63fd1 100644
109097--- a/net/9p/trans_fd.c
109098+++ b/net/9p/trans_fd.c
109099@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
109100 oldfs = get_fs();
109101 set_fs(get_ds());
109102 /* The cast to a user pointer is valid due to the set_fs() */
109103- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
109104+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
109105 set_fs(oldfs);
109106
109107 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
109108diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
109109index af46bc4..f9adfcd 100644
109110--- a/net/appletalk/atalk_proc.c
109111+++ b/net/appletalk/atalk_proc.c
109112@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
109113 struct proc_dir_entry *p;
109114 int rc = -ENOMEM;
109115
109116- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
109117+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
109118 if (!atalk_proc_dir)
109119 goto out;
109120
109121diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
109122index 876fbe8..8bbea9f 100644
109123--- a/net/atm/atm_misc.c
109124+++ b/net/atm/atm_misc.c
109125@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
109126 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
109127 return 1;
109128 atm_return(vcc, truesize);
109129- atomic_inc(&vcc->stats->rx_drop);
109130+ atomic_inc_unchecked(&vcc->stats->rx_drop);
109131 return 0;
109132 }
109133 EXPORT_SYMBOL(atm_charge);
109134@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
109135 }
109136 }
109137 atm_return(vcc, guess);
109138- atomic_inc(&vcc->stats->rx_drop);
109139+ atomic_inc_unchecked(&vcc->stats->rx_drop);
109140 return NULL;
109141 }
109142 EXPORT_SYMBOL(atm_alloc_charge);
109143@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
109144
109145 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
109146 {
109147-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
109148+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
109149 __SONET_ITEMS
109150 #undef __HANDLE_ITEM
109151 }
109152@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
109153
109154 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
109155 {
109156-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
109157+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
109158 __SONET_ITEMS
109159 #undef __HANDLE_ITEM
109160 }
109161diff --git a/net/atm/lec.c b/net/atm/lec.c
109162index 4b98f89..5a2f6cb 100644
109163--- a/net/atm/lec.c
109164+++ b/net/atm/lec.c
109165@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
109166 }
109167
109168 static struct lane2_ops lane2_ops = {
109169- lane2_resolve, /* resolve, spec 3.1.3 */
109170- lane2_associate_req, /* associate_req, spec 3.1.4 */
109171- NULL /* associate indicator, spec 3.1.5 */
109172+ .resolve = lane2_resolve,
109173+ .associate_req = lane2_associate_req,
109174+ .associate_indicator = NULL
109175 };
109176
109177 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
109178diff --git a/net/atm/lec.h b/net/atm/lec.h
109179index 4149db1..f2ab682 100644
109180--- a/net/atm/lec.h
109181+++ b/net/atm/lec.h
109182@@ -48,7 +48,7 @@ struct lane2_ops {
109183 const u8 *tlvs, u32 sizeoftlvs);
109184 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
109185 const u8 *tlvs, u32 sizeoftlvs);
109186-};
109187+} __no_const;
109188
109189 /*
109190 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
109191diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
109192index d1b2d9a..d549f7f 100644
109193--- a/net/atm/mpoa_caches.c
109194+++ b/net/atm/mpoa_caches.c
109195@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
109196
109197
109198 static struct in_cache_ops ingress_ops = {
109199- in_cache_add_entry, /* add_entry */
109200- in_cache_get, /* get */
109201- in_cache_get_with_mask, /* get_with_mask */
109202- in_cache_get_by_vcc, /* get_by_vcc */
109203- in_cache_put, /* put */
109204- in_cache_remove_entry, /* remove_entry */
109205- cache_hit, /* cache_hit */
109206- clear_count_and_expired, /* clear_count */
109207- check_resolving_entries, /* check_resolving */
109208- refresh_entries, /* refresh */
109209- in_destroy_cache /* destroy_cache */
109210+ .add_entry = in_cache_add_entry,
109211+ .get = in_cache_get,
109212+ .get_with_mask = in_cache_get_with_mask,
109213+ .get_by_vcc = in_cache_get_by_vcc,
109214+ .put = in_cache_put,
109215+ .remove_entry = in_cache_remove_entry,
109216+ .cache_hit = cache_hit,
109217+ .clear_count = clear_count_and_expired,
109218+ .check_resolving = check_resolving_entries,
109219+ .refresh = refresh_entries,
109220+ .destroy_cache = in_destroy_cache
109221 };
109222
109223 static struct eg_cache_ops egress_ops = {
109224- eg_cache_add_entry, /* add_entry */
109225- eg_cache_get_by_cache_id, /* get_by_cache_id */
109226- eg_cache_get_by_tag, /* get_by_tag */
109227- eg_cache_get_by_vcc, /* get_by_vcc */
109228- eg_cache_get_by_src_ip, /* get_by_src_ip */
109229- eg_cache_put, /* put */
109230- eg_cache_remove_entry, /* remove_entry */
109231- update_eg_cache_entry, /* update */
109232- clear_expired, /* clear_expired */
109233- eg_destroy_cache /* destroy_cache */
109234+ .add_entry = eg_cache_add_entry,
109235+ .get_by_cache_id = eg_cache_get_by_cache_id,
109236+ .get_by_tag = eg_cache_get_by_tag,
109237+ .get_by_vcc = eg_cache_get_by_vcc,
109238+ .get_by_src_ip = eg_cache_get_by_src_ip,
109239+ .put = eg_cache_put,
109240+ .remove_entry = eg_cache_remove_entry,
109241+ .update = update_eg_cache_entry,
109242+ .clear_expired = clear_expired,
109243+ .destroy_cache = eg_destroy_cache
109244 };
109245
109246
109247diff --git a/net/atm/proc.c b/net/atm/proc.c
109248index bbb6461..cf04016 100644
109249--- a/net/atm/proc.c
109250+++ b/net/atm/proc.c
109251@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
109252 const struct k_atm_aal_stats *stats)
109253 {
109254 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
109255- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
109256- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
109257- atomic_read(&stats->rx_drop));
109258+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
109259+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
109260+ atomic_read_unchecked(&stats->rx_drop));
109261 }
109262
109263 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
109264diff --git a/net/atm/resources.c b/net/atm/resources.c
109265index 0447d5d..3cf4728 100644
109266--- a/net/atm/resources.c
109267+++ b/net/atm/resources.c
109268@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
109269 static void copy_aal_stats(struct k_atm_aal_stats *from,
109270 struct atm_aal_stats *to)
109271 {
109272-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
109273+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
109274 __AAL_STAT_ITEMS
109275 #undef __HANDLE_ITEM
109276 }
109277@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
109278 static void subtract_aal_stats(struct k_atm_aal_stats *from,
109279 struct atm_aal_stats *to)
109280 {
109281-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
109282+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
109283 __AAL_STAT_ITEMS
109284 #undef __HANDLE_ITEM
109285 }
109286diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
109287index 919a5ce..cc6b444 100644
109288--- a/net/ax25/sysctl_net_ax25.c
109289+++ b/net/ax25/sysctl_net_ax25.c
109290@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
109291 {
109292 char path[sizeof("net/ax25/") + IFNAMSIZ];
109293 int k;
109294- struct ctl_table *table;
109295+ ctl_table_no_const *table;
109296
109297 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
109298 if (!table)
109299diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
109300index 00e00e0..710fcd2 100644
109301--- a/net/batman-adv/bat_iv_ogm.c
109302+++ b/net/batman-adv/bat_iv_ogm.c
109303@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
109304
109305 /* randomize initial seqno to avoid collision */
109306 get_random_bytes(&random_seqno, sizeof(random_seqno));
109307- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
109308+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
109309
109310 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
109311 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
109312@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
109313 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
109314
109315 /* change sequence number to network order */
109316- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
109317+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
109318 batadv_ogm_packet->seqno = htonl(seqno);
109319- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
109320+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
109321
109322 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
109323
109324@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
109325 return;
109326
109327 /* could be changed by schedule_own_packet() */
109328- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
109329+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
109330
109331 if (ogm_packet->flags & BATADV_DIRECTLINK)
109332 has_directlink_flag = true;
109333diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
109334index 3d1dcaa..4699f4e 100644
109335--- a/net/batman-adv/fragmentation.c
109336+++ b/net/batman-adv/fragmentation.c
109337@@ -449,7 +449,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
109338 frag_header.packet_type = BATADV_UNICAST_FRAG;
109339 frag_header.version = BATADV_COMPAT_VERSION;
109340 frag_header.ttl = BATADV_TTL;
109341- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
109342+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
109343 frag_header.reserved = 0;
109344 frag_header.no = 0;
109345 frag_header.total_size = htons(skb->len);
109346diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
109347index 5ec31d7..e371631 100644
109348--- a/net/batman-adv/soft-interface.c
109349+++ b/net/batman-adv/soft-interface.c
109350@@ -295,7 +295,7 @@ send:
109351 primary_if->net_dev->dev_addr);
109352
109353 /* set broadcast sequence number */
109354- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
109355+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
109356 bcast_packet->seqno = htonl(seqno);
109357
109358 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
109359@@ -760,7 +760,7 @@ static int batadv_softif_init_late(struct net_device *dev)
109360 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
109361
109362 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
109363- atomic_set(&bat_priv->bcast_seqno, 1);
109364+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
109365 atomic_set(&bat_priv->tt.vn, 0);
109366 atomic_set(&bat_priv->tt.local_changes, 0);
109367 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
109368@@ -774,7 +774,7 @@ static int batadv_softif_init_late(struct net_device *dev)
109369
109370 /* randomize initial seqno to avoid collision */
109371 get_random_bytes(&random_seqno, sizeof(random_seqno));
109372- atomic_set(&bat_priv->frag_seqno, random_seqno);
109373+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
109374
109375 bat_priv->primary_if = NULL;
109376 bat_priv->num_ifaces = 0;
109377@@ -982,7 +982,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
109378 return 0;
109379 }
109380
109381-struct rtnl_link_ops batadv_link_ops __read_mostly = {
109382+struct rtnl_link_ops batadv_link_ops = {
109383 .kind = "batadv",
109384 .priv_size = sizeof(struct batadv_priv),
109385 .setup = batadv_softif_init_early,
109386diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
109387index 9398c3f..0e79657 100644
109388--- a/net/batman-adv/types.h
109389+++ b/net/batman-adv/types.h
109390@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
109391 struct batadv_hard_iface_bat_iv {
109392 unsigned char *ogm_buff;
109393 int ogm_buff_len;
109394- atomic_t ogm_seqno;
109395+ atomic_unchecked_t ogm_seqno;
109396 };
109397
109398 /**
109399@@ -766,7 +766,7 @@ struct batadv_priv {
109400 atomic_t bonding;
109401 atomic_t fragmentation;
109402 atomic_t packet_size_max;
109403- atomic_t frag_seqno;
109404+ atomic_unchecked_t frag_seqno;
109405 #ifdef CONFIG_BATMAN_ADV_BLA
109406 atomic_t bridge_loop_avoidance;
109407 #endif
109408@@ -785,7 +785,7 @@ struct batadv_priv {
109409 #endif
109410 uint32_t isolation_mark;
109411 uint32_t isolation_mark_mask;
109412- atomic_t bcast_seqno;
109413+ atomic_unchecked_t bcast_seqno;
109414 atomic_t bcast_queue_left;
109415 atomic_t batman_queue_left;
109416 char num_ifaces;
109417diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
109418index 1d65c5b..43e55fd 100644
109419--- a/net/bluetooth/hci_sock.c
109420+++ b/net/bluetooth/hci_sock.c
109421@@ -1042,7 +1042,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
109422 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
109423 }
109424
109425- len = min_t(unsigned int, len, sizeof(uf));
109426+ len = min((size_t)len, sizeof(uf));
109427 if (copy_from_user(&uf, optval, len)) {
109428 err = -EFAULT;
109429 break;
109430diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
109431index 6ba33f9..4afc26f 100644
109432--- a/net/bluetooth/l2cap_core.c
109433+++ b/net/bluetooth/l2cap_core.c
109434@@ -3534,8 +3534,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
109435 break;
109436
109437 case L2CAP_CONF_RFC:
109438- if (olen == sizeof(rfc))
109439- memcpy(&rfc, (void *)val, olen);
109440+ if (olen != sizeof(rfc))
109441+ break;
109442+
109443+ memcpy(&rfc, (void *)val, olen);
109444
109445 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
109446 rfc.mode != chan->mode)
109447diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
109448index 60694f0..32623ed 100644
109449--- a/net/bluetooth/l2cap_sock.c
109450+++ b/net/bluetooth/l2cap_sock.c
109451@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
109452 struct sock *sk = sock->sk;
109453 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
109454 struct l2cap_options opts;
109455- int len, err = 0;
109456+ int err = 0;
109457+ size_t len = optlen;
109458 u32 opt;
109459
109460 BT_DBG("sk %p", sk);
109461@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
109462 opts.max_tx = chan->max_tx;
109463 opts.txwin_size = chan->tx_win;
109464
109465- len = min_t(unsigned int, sizeof(opts), optlen);
109466+ len = min(sizeof(opts), len);
109467 if (copy_from_user((char *) &opts, optval, len)) {
109468 err = -EFAULT;
109469 break;
109470@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
109471 struct bt_security sec;
109472 struct bt_power pwr;
109473 struct l2cap_conn *conn;
109474- int len, err = 0;
109475+ int err = 0;
109476+ size_t len = optlen;
109477 u32 opt;
109478
109479 BT_DBG("sk %p", sk);
109480@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
109481
109482 sec.level = BT_SECURITY_LOW;
109483
109484- len = min_t(unsigned int, sizeof(sec), optlen);
109485+ len = min(sizeof(sec), len);
109486 if (copy_from_user((char *) &sec, optval, len)) {
109487 err = -EFAULT;
109488 break;
109489@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
109490
109491 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
109492
109493- len = min_t(unsigned int, sizeof(pwr), optlen);
109494+ len = min(sizeof(pwr), len);
109495 if (copy_from_user((char *) &pwr, optval, len)) {
109496 err = -EFAULT;
109497 break;
109498diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
109499index 3c6d2c8..6afc970 100644
109500--- a/net/bluetooth/rfcomm/sock.c
109501+++ b/net/bluetooth/rfcomm/sock.c
109502@@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
109503 struct sock *sk = sock->sk;
109504 struct bt_security sec;
109505 int err = 0;
109506- size_t len;
109507+ size_t len = optlen;
109508 u32 opt;
109509
109510 BT_DBG("sk %p", sk);
109511@@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
109512
109513 sec.level = BT_SECURITY_LOW;
109514
109515- len = min_t(unsigned int, sizeof(sec), optlen);
109516+ len = min(sizeof(sec), len);
109517 if (copy_from_user((char *) &sec, optval, len)) {
109518 err = -EFAULT;
109519 break;
109520diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
109521index 8e385a0..a5bdd8e 100644
109522--- a/net/bluetooth/rfcomm/tty.c
109523+++ b/net/bluetooth/rfcomm/tty.c
109524@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
109525 BT_DBG("tty %p id %d", tty, tty->index);
109526
109527 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
109528- dev->channel, dev->port.count);
109529+ dev->channel, atomic_read(&dev->port.count));
109530
109531 err = tty_port_open(&dev->port, tty, filp);
109532 if (err)
109533@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
109534 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
109535
109536 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
109537- dev->port.count);
109538+ atomic_read(&dev->port.count));
109539
109540 tty_port_close(&dev->port, tty, filp);
109541 }
109542diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
109543index 4fbcea0..69a6786 100644
109544--- a/net/bridge/br_netlink.c
109545+++ b/net/bridge/br_netlink.c
109546@@ -726,7 +726,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
109547 .get_link_af_size = br_get_link_af_size,
109548 };
109549
109550-struct rtnl_link_ops br_link_ops __read_mostly = {
109551+struct rtnl_link_ops br_link_ops = {
109552 .kind = "bridge",
109553 .priv_size = sizeof(struct net_bridge),
109554 .setup = br_dev_setup,
109555diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
109556index 91180a7..1301daa 100644
109557--- a/net/bridge/netfilter/ebtables.c
109558+++ b/net/bridge/netfilter/ebtables.c
109559@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
109560 tmp.valid_hooks = t->table->valid_hooks;
109561 }
109562 mutex_unlock(&ebt_mutex);
109563- if (copy_to_user(user, &tmp, *len) != 0) {
109564+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
109565 BUGPRINT("c2u Didn't work\n");
109566 ret = -EFAULT;
109567 break;
109568@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
109569 goto out;
109570 tmp.valid_hooks = t->valid_hooks;
109571
109572- if (copy_to_user(user, &tmp, *len) != 0) {
109573+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
109574 ret = -EFAULT;
109575 break;
109576 }
109577@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
109578 tmp.entries_size = t->table->entries_size;
109579 tmp.valid_hooks = t->table->valid_hooks;
109580
109581- if (copy_to_user(user, &tmp, *len) != 0) {
109582+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
109583 ret = -EFAULT;
109584 break;
109585 }
109586diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
109587index f5afda1..dcf770a 100644
109588--- a/net/caif/cfctrl.c
109589+++ b/net/caif/cfctrl.c
109590@@ -10,6 +10,7 @@
109591 #include <linux/spinlock.h>
109592 #include <linux/slab.h>
109593 #include <linux/pkt_sched.h>
109594+#include <linux/sched.h>
109595 #include <net/caif/caif_layer.h>
109596 #include <net/caif/cfpkt.h>
109597 #include <net/caif/cfctrl.h>
109598@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
109599 memset(&dev_info, 0, sizeof(dev_info));
109600 dev_info.id = 0xff;
109601 cfsrvl_init(&this->serv, 0, &dev_info, false);
109602- atomic_set(&this->req_seq_no, 1);
109603- atomic_set(&this->rsp_seq_no, 1);
109604+ atomic_set_unchecked(&this->req_seq_no, 1);
109605+ atomic_set_unchecked(&this->rsp_seq_no, 1);
109606 this->serv.layer.receive = cfctrl_recv;
109607 sprintf(this->serv.layer.name, "ctrl");
109608 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
109609@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
109610 struct cfctrl_request_info *req)
109611 {
109612 spin_lock_bh(&ctrl->info_list_lock);
109613- atomic_inc(&ctrl->req_seq_no);
109614- req->sequence_no = atomic_read(&ctrl->req_seq_no);
109615+ atomic_inc_unchecked(&ctrl->req_seq_no);
109616+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
109617 list_add_tail(&req->list, &ctrl->list);
109618 spin_unlock_bh(&ctrl->info_list_lock);
109619 }
109620@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
109621 if (p != first)
109622 pr_warn("Requests are not received in order\n");
109623
109624- atomic_set(&ctrl->rsp_seq_no,
109625+ atomic_set_unchecked(&ctrl->rsp_seq_no,
109626 p->sequence_no);
109627 list_del(&p->list);
109628 goto out;
109629diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
109630index 67a4a36..8d28068 100644
109631--- a/net/caif/chnl_net.c
109632+++ b/net/caif/chnl_net.c
109633@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
109634 };
109635
109636
109637-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
109638+static struct rtnl_link_ops ipcaif_link_ops = {
109639 .kind = "caif",
109640 .priv_size = sizeof(struct chnl_net),
109641 .setup = ipcaif_net_setup,
109642diff --git a/net/can/af_can.c b/net/can/af_can.c
109643index 32d710e..93bcf05 100644
109644--- a/net/can/af_can.c
109645+++ b/net/can/af_can.c
109646@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
109647 };
109648
109649 /* notifier block for netdevice event */
109650-static struct notifier_block can_netdev_notifier __read_mostly = {
109651+static struct notifier_block can_netdev_notifier = {
109652 .notifier_call = can_notifier,
109653 };
109654
109655diff --git a/net/can/bcm.c b/net/can/bcm.c
109656index ee9ffd9..dfdf3d4 100644
109657--- a/net/can/bcm.c
109658+++ b/net/can/bcm.c
109659@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
109660 }
109661
109662 /* create /proc/net/can-bcm directory */
109663- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
109664+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
109665 return 0;
109666 }
109667
109668diff --git a/net/can/gw.c b/net/can/gw.c
109669index a6f448e..5902171 100644
109670--- a/net/can/gw.c
109671+++ b/net/can/gw.c
109672@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
109673 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
109674
109675 static HLIST_HEAD(cgw_list);
109676-static struct notifier_block notifier;
109677
109678 static struct kmem_cache *cgw_cache __read_mostly;
109679
109680@@ -948,6 +947,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
109681 return err;
109682 }
109683
109684+static struct notifier_block notifier = {
109685+ .notifier_call = cgw_notifier
109686+};
109687+
109688 static __init int cgw_module_init(void)
109689 {
109690 /* sanitize given module parameter */
109691@@ -963,7 +966,6 @@ static __init int cgw_module_init(void)
109692 return -ENOMEM;
109693
109694 /* set notifier */
109695- notifier.notifier_call = cgw_notifier;
109696 register_netdevice_notifier(&notifier);
109697
109698 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
109699diff --git a/net/can/proc.c b/net/can/proc.c
109700index 1a19b98..df2b4ec 100644
109701--- a/net/can/proc.c
109702+++ b/net/can/proc.c
109703@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
109704 void can_init_proc(void)
109705 {
109706 /* create /proc/net/can directory */
109707- can_dir = proc_mkdir("can", init_net.proc_net);
109708+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
109709
109710 if (!can_dir) {
109711 printk(KERN_INFO "can: failed to create /proc/net/can . "
109712diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
109713index a9f4ae4..ee19b92 100644
109714--- a/net/ceph/messenger.c
109715+++ b/net/ceph/messenger.c
109716@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
109717 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
109718
109719 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
109720-static atomic_t addr_str_seq = ATOMIC_INIT(0);
109721+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
109722
109723 static struct page *zero_page; /* used in certain error cases */
109724
109725@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
109726 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
109727 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
109728
109729- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
109730+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
109731 s = addr_str[i];
109732
109733 switch (ss->ss_family) {
109734diff --git a/net/compat.c b/net/compat.c
109735index f7bd286..76ea56a 100644
109736--- a/net/compat.c
109737+++ b/net/compat.c
109738@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
109739
109740 #define CMSG_COMPAT_FIRSTHDR(msg) \
109741 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
109742- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
109743+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
109744 (struct compat_cmsghdr __user *)NULL)
109745
109746 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
109747 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
109748 (ucmlen) <= (unsigned long) \
109749 ((mhdr)->msg_controllen - \
109750- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
109751+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
109752
109753 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
109754 struct compat_cmsghdr __user *cmsg, int cmsg_len)
109755 {
109756 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
109757- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
109758+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
109759 msg->msg_controllen)
109760 return NULL;
109761 return (struct compat_cmsghdr __user *)ptr;
109762@@ -203,7 +203,7 @@ Efault:
109763
109764 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
109765 {
109766- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
109767+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
109768 struct compat_cmsghdr cmhdr;
109769 struct compat_timeval ctv;
109770 struct compat_timespec cts[3];
109771@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
109772
109773 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
109774 {
109775- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
109776+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
109777 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
109778 int fdnum = scm->fp->count;
109779 struct file **fp = scm->fp->fp;
109780@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
109781 return -EFAULT;
109782 old_fs = get_fs();
109783 set_fs(KERNEL_DS);
109784- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
109785+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
109786 set_fs(old_fs);
109787
109788 return err;
109789@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
109790 len = sizeof(ktime);
109791 old_fs = get_fs();
109792 set_fs(KERNEL_DS);
109793- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
109794+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
109795 set_fs(old_fs);
109796
109797 if (!err) {
109798@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
109799 case MCAST_JOIN_GROUP:
109800 case MCAST_LEAVE_GROUP:
109801 {
109802- struct compat_group_req __user *gr32 = (void *)optval;
109803+ struct compat_group_req __user *gr32 = (void __user *)optval;
109804 struct group_req __user *kgr =
109805 compat_alloc_user_space(sizeof(struct group_req));
109806 u32 interface;
109807@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
109808 case MCAST_BLOCK_SOURCE:
109809 case MCAST_UNBLOCK_SOURCE:
109810 {
109811- struct compat_group_source_req __user *gsr32 = (void *)optval;
109812+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
109813 struct group_source_req __user *kgsr = compat_alloc_user_space(
109814 sizeof(struct group_source_req));
109815 u32 interface;
109816@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
109817 }
109818 case MCAST_MSFILTER:
109819 {
109820- struct compat_group_filter __user *gf32 = (void *)optval;
109821+ struct compat_group_filter __user *gf32 = (void __user *)optval;
109822 struct group_filter __user *kgf;
109823 u32 interface, fmode, numsrc;
109824
109825@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
109826 char __user *optval, int __user *optlen,
109827 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
109828 {
109829- struct compat_group_filter __user *gf32 = (void *)optval;
109830+ struct compat_group_filter __user *gf32 = (void __user *)optval;
109831 struct group_filter __user *kgf;
109832 int __user *koptlen;
109833 u32 interface, fmode, numsrc;
109834@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
109835
109836 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
109837 return -EINVAL;
109838- if (copy_from_user(a, args, nas[call]))
109839+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
109840 return -EFAULT;
109841 a0 = a[0];
109842 a1 = a[1];
109843diff --git a/net/core/datagram.c b/net/core/datagram.c
109844index df493d6..1145766 100644
109845--- a/net/core/datagram.c
109846+++ b/net/core/datagram.c
109847@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
109848 }
109849
109850 kfree_skb(skb);
109851- atomic_inc(&sk->sk_drops);
109852+ atomic_inc_unchecked(&sk->sk_drops);
109853 sk_mem_reclaim_partial(sk);
109854
109855 return err;
109856diff --git a/net/core/dev.c b/net/core/dev.c
109857index 22a53ac..1d19af7 100644
109858--- a/net/core/dev.c
109859+++ b/net/core/dev.c
109860@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
109861 {
109862 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
109863 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
109864- atomic_long_inc(&dev->rx_dropped);
109865+ atomic_long_inc_unchecked(&dev->rx_dropped);
109866 kfree_skb(skb);
109867 return NET_RX_DROP;
109868 }
109869 }
109870
109871 if (unlikely(!is_skb_forwardable(dev, skb))) {
109872- atomic_long_inc(&dev->rx_dropped);
109873+ atomic_long_inc_unchecked(&dev->rx_dropped);
109874 kfree_skb(skb);
109875 return NET_RX_DROP;
109876 }
109877@@ -2987,7 +2987,7 @@ recursion_alert:
109878 drop:
109879 rcu_read_unlock_bh();
109880
109881- atomic_long_inc(&dev->tx_dropped);
109882+ atomic_long_inc_unchecked(&dev->tx_dropped);
109883 kfree_skb_list(skb);
109884 return rc;
109885 out:
109886@@ -3336,7 +3336,7 @@ enqueue:
109887
109888 local_irq_restore(flags);
109889
109890- atomic_long_inc(&skb->dev->rx_dropped);
109891+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
109892 kfree_skb(skb);
109893 return NET_RX_DROP;
109894 }
109895@@ -3413,7 +3413,7 @@ int netif_rx_ni(struct sk_buff *skb)
109896 }
109897 EXPORT_SYMBOL(netif_rx_ni);
109898
109899-static void net_tx_action(struct softirq_action *h)
109900+static __latent_entropy void net_tx_action(void)
109901 {
109902 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
109903
109904@@ -3751,7 +3751,7 @@ ncls:
109905 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
109906 } else {
109907 drop:
109908- atomic_long_inc(&skb->dev->rx_dropped);
109909+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
109910 kfree_skb(skb);
109911 /* Jamal, now you will not able to escape explaining
109912 * me how you were going to use this. :-)
109913@@ -4640,7 +4640,7 @@ out_unlock:
109914 return work;
109915 }
109916
109917-static void net_rx_action(struct softirq_action *h)
109918+static __latent_entropy void net_rx_action(void)
109919 {
109920 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
109921 unsigned long time_limit = jiffies + 2;
109922@@ -6676,8 +6676,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
109923 } else {
109924 netdev_stats_to_stats64(storage, &dev->stats);
109925 }
109926- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
109927- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
109928+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
109929+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
109930 return storage;
109931 }
109932 EXPORT_SYMBOL(dev_get_stats);
109933diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
109934index b94b1d2..da3ed7c 100644
109935--- a/net/core/dev_ioctl.c
109936+++ b/net/core/dev_ioctl.c
109937@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
109938 no_module = !dev;
109939 if (no_module && capable(CAP_NET_ADMIN))
109940 no_module = request_module("netdev-%s", name);
109941- if (no_module && capable(CAP_SYS_MODULE))
109942+ if (no_module && capable(CAP_SYS_MODULE)) {
109943+#ifdef CONFIG_GRKERNSEC_MODHARDEN
109944+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
109945+#else
109946 request_module("%s", name);
109947+#endif
109948+ }
109949 }
109950 EXPORT_SYMBOL(dev_load);
109951
109952diff --git a/net/core/filter.c b/net/core/filter.c
109953index f6bdc2b..76eba8e 100644
109954--- a/net/core/filter.c
109955+++ b/net/core/filter.c
109956@@ -533,7 +533,11 @@ do_pass:
109957
109958 /* Unknown instruction. */
109959 default:
109960- goto err;
109961+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
109962+ fp->code, fp->jt, fp->jf, fp->k);
109963+ kfree(addrs);
109964+ BUG();
109965+ return -EINVAL;
109966 }
109967
109968 insn++;
109969@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
109970 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
109971 int pc, ret = 0;
109972
109973- BUILD_BUG_ON(BPF_MEMWORDS > 16);
109974+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
109975
109976 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
109977 if (!masks)
109978@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
109979 if (!fp)
109980 return -ENOMEM;
109981
109982- memcpy(fp->insns, fprog->filter, fsize);
109983+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
109984
109985 fp->len = fprog->len;
109986 /* Since unattached filters are not copied back to user
109987diff --git a/net/core/flow.c b/net/core/flow.c
109988index 1033725..340f65d 100644
109989--- a/net/core/flow.c
109990+++ b/net/core/flow.c
109991@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
109992 static int flow_entry_valid(struct flow_cache_entry *fle,
109993 struct netns_xfrm *xfrm)
109994 {
109995- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
109996+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
109997 return 0;
109998 if (fle->object && !fle->object->ops->check(fle->object))
109999 return 0;
110000@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
110001 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
110002 fcp->hash_count++;
110003 }
110004- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
110005+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
110006 flo = fle->object;
110007 if (!flo)
110008 goto ret_object;
110009@@ -263,7 +263,7 @@ nocache:
110010 }
110011 flo = resolver(net, key, family, dir, flo, ctx);
110012 if (fle) {
110013- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
110014+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
110015 if (!IS_ERR(flo))
110016 fle->object = flo;
110017 else
110018diff --git a/net/core/neighbour.c b/net/core/neighbour.c
110019index 70fe9e1..926784c 100644
110020--- a/net/core/neighbour.c
110021+++ b/net/core/neighbour.c
110022@@ -2806,7 +2806,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
110023 void __user *buffer, size_t *lenp, loff_t *ppos)
110024 {
110025 int size, ret;
110026- struct ctl_table tmp = *ctl;
110027+ ctl_table_no_const tmp = *ctl;
110028
110029 tmp.extra1 = &zero;
110030 tmp.extra2 = &unres_qlen_max;
110031@@ -2868,7 +2868,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
110032 void __user *buffer,
110033 size_t *lenp, loff_t *ppos)
110034 {
110035- struct ctl_table tmp = *ctl;
110036+ ctl_table_no_const tmp = *ctl;
110037 int ret;
110038
110039 tmp.extra1 = &zero;
110040diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
110041index 2bf8329..2eb1423 100644
110042--- a/net/core/net-procfs.c
110043+++ b/net/core/net-procfs.c
110044@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
110045 struct rtnl_link_stats64 temp;
110046 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
110047
110048- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110049+ if (gr_proc_is_restricted())
110050+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110051+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
110052+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
110053+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
110054+ else
110055+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110056 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
110057 dev->name, stats->rx_bytes, stats->rx_packets,
110058 stats->rx_errors,
110059@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
110060 return 0;
110061 }
110062
110063-static const struct seq_operations dev_seq_ops = {
110064+const struct seq_operations dev_seq_ops = {
110065 .start = dev_seq_start,
110066 .next = dev_seq_next,
110067 .stop = dev_seq_stop,
110068@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
110069
110070 static int softnet_seq_open(struct inode *inode, struct file *file)
110071 {
110072- return seq_open(file, &softnet_seq_ops);
110073+ return seq_open_restrict(file, &softnet_seq_ops);
110074 }
110075
110076 static const struct file_operations softnet_seq_fops = {
110077@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
110078 else
110079 seq_printf(seq, "%04x", ntohs(pt->type));
110080
110081+#ifdef CONFIG_GRKERNSEC_HIDESYM
110082+ seq_printf(seq, " %-8s %pf\n",
110083+ pt->dev ? pt->dev->name : "", NULL);
110084+#else
110085 seq_printf(seq, " %-8s %pf\n",
110086 pt->dev ? pt->dev->name : "", pt->func);
110087+#endif
110088 }
110089
110090 return 0;
110091diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
110092index f2aa73b..0d1a1ea 100644
110093--- a/net/core/net-sysfs.c
110094+++ b/net/core/net-sysfs.c
110095@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
110096 {
110097 struct net_device *netdev = to_net_dev(dev);
110098 return sprintf(buf, fmt_dec,
110099- atomic_read(&netdev->carrier_changes));
110100+ atomic_read_unchecked(&netdev->carrier_changes));
110101 }
110102 static DEVICE_ATTR_RO(carrier_changes);
110103
110104diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
110105index 70d3450..eb7c528 100644
110106--- a/net/core/net_namespace.c
110107+++ b/net/core/net_namespace.c
110108@@ -663,7 +663,7 @@ static int __register_pernet_operations(struct list_head *list,
110109 int error;
110110 LIST_HEAD(net_exit_list);
110111
110112- list_add_tail(&ops->list, list);
110113+ pax_list_add_tail((struct list_head *)&ops->list, list);
110114 if (ops->init || (ops->id && ops->size)) {
110115 for_each_net(net) {
110116 error = ops_init(ops, net);
110117@@ -676,7 +676,7 @@ static int __register_pernet_operations(struct list_head *list,
110118
110119 out_undo:
110120 /* If I have an error cleanup all namespaces I initialized */
110121- list_del(&ops->list);
110122+ pax_list_del((struct list_head *)&ops->list);
110123 ops_exit_list(ops, &net_exit_list);
110124 ops_free_list(ops, &net_exit_list);
110125 return error;
110126@@ -687,7 +687,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
110127 struct net *net;
110128 LIST_HEAD(net_exit_list);
110129
110130- list_del(&ops->list);
110131+ pax_list_del((struct list_head *)&ops->list);
110132 for_each_net(net)
110133 list_add_tail(&net->exit_list, &net_exit_list);
110134 ops_exit_list(ops, &net_exit_list);
110135@@ -821,7 +821,7 @@ int register_pernet_device(struct pernet_operations *ops)
110136 mutex_lock(&net_mutex);
110137 error = register_pernet_operations(&pernet_list, ops);
110138 if (!error && (first_device == &pernet_list))
110139- first_device = &ops->list;
110140+ first_device = (struct list_head *)&ops->list;
110141 mutex_unlock(&net_mutex);
110142 return error;
110143 }
110144diff --git a/net/core/netpoll.c b/net/core/netpoll.c
110145index c126a87..10ad89d 100644
110146--- a/net/core/netpoll.c
110147+++ b/net/core/netpoll.c
110148@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
110149 struct udphdr *udph;
110150 struct iphdr *iph;
110151 struct ethhdr *eth;
110152- static atomic_t ip_ident;
110153+ static atomic_unchecked_t ip_ident;
110154 struct ipv6hdr *ip6h;
110155
110156 udp_len = len + sizeof(*udph);
110157@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
110158 put_unaligned(0x45, (unsigned char *)iph);
110159 iph->tos = 0;
110160 put_unaligned(htons(ip_len), &(iph->tot_len));
110161- iph->id = htons(atomic_inc_return(&ip_ident));
110162+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
110163 iph->frag_off = 0;
110164 iph->ttl = 64;
110165 iph->protocol = IPPROTO_UDP;
110166diff --git a/net/core/pktgen.c b/net/core/pktgen.c
110167index 508155b..fad080f 100644
110168--- a/net/core/pktgen.c
110169+++ b/net/core/pktgen.c
110170@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
110171 pn->net = net;
110172 INIT_LIST_HEAD(&pn->pktgen_threads);
110173 pn->pktgen_exiting = false;
110174- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
110175+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
110176 if (!pn->proc_dir) {
110177 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
110178 return -ENODEV;
110179diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
110180index 7ebed55..378bf34 100644
110181--- a/net/core/rtnetlink.c
110182+++ b/net/core/rtnetlink.c
110183@@ -61,7 +61,7 @@ struct rtnl_link {
110184 rtnl_doit_func doit;
110185 rtnl_dumpit_func dumpit;
110186 rtnl_calcit_func calcit;
110187-};
110188+} __no_const;
110189
110190 static DEFINE_MUTEX(rtnl_mutex);
110191
110192@@ -307,10 +307,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
110193 * to use the ops for creating device. So do not
110194 * fill up dellink as well. That disables rtnl_dellink.
110195 */
110196- if (ops->setup && !ops->dellink)
110197- ops->dellink = unregister_netdevice_queue;
110198+ if (ops->setup && !ops->dellink) {
110199+ pax_open_kernel();
110200+ *(void **)&ops->dellink = unregister_netdevice_queue;
110201+ pax_close_kernel();
110202+ }
110203
110204- list_add_tail(&ops->list, &link_ops);
110205+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
110206 return 0;
110207 }
110208 EXPORT_SYMBOL_GPL(__rtnl_link_register);
110209@@ -357,7 +360,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
110210 for_each_net(net) {
110211 __rtnl_kill_links(net, ops);
110212 }
110213- list_del(&ops->list);
110214+ pax_list_del((struct list_head *)&ops->list);
110215 }
110216 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
110217
110218@@ -1047,7 +1050,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
110219 (dev->ifalias &&
110220 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
110221 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
110222- atomic_read(&dev->carrier_changes)))
110223+ atomic_read_unchecked(&dev->carrier_changes)))
110224 goto nla_put_failure;
110225
110226 if (1) {
110227diff --git a/net/core/scm.c b/net/core/scm.c
110228index 3b6899b..cf36238 100644
110229--- a/net/core/scm.c
110230+++ b/net/core/scm.c
110231@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
110232 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
110233 {
110234 struct cmsghdr __user *cm
110235- = (__force struct cmsghdr __user *)msg->msg_control;
110236+ = (struct cmsghdr __force_user *)msg->msg_control;
110237 struct cmsghdr cmhdr;
110238 int cmlen = CMSG_LEN(len);
110239 int err;
110240@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
110241 err = -EFAULT;
110242 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
110243 goto out;
110244- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
110245+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
110246 goto out;
110247 cmlen = CMSG_SPACE(len);
110248 if (msg->msg_controllen < cmlen)
110249@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
110250 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
110251 {
110252 struct cmsghdr __user *cm
110253- = (__force struct cmsghdr __user*)msg->msg_control;
110254+ = (struct cmsghdr __force_user *)msg->msg_control;
110255
110256 int fdmax = 0;
110257 int fdnum = scm->fp->count;
110258@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
110259 if (fdnum < fdmax)
110260 fdmax = fdnum;
110261
110262- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
110263+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
110264 i++, cmfptr++)
110265 {
110266 struct socket *sock;
110267diff --git a/net/core/skbuff.c b/net/core/skbuff.c
110268index e9f9a15..6eb024e 100644
110269--- a/net/core/skbuff.c
110270+++ b/net/core/skbuff.c
110271@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
110272 __wsum skb_checksum(const struct sk_buff *skb, int offset,
110273 int len, __wsum csum)
110274 {
110275- const struct skb_checksum_ops ops = {
110276+ static const struct skb_checksum_ops ops = {
110277 .update = csum_partial_ext,
110278 .combine = csum_block_add_ext,
110279 };
110280@@ -3379,12 +3379,14 @@ void __init skb_init(void)
110281 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
110282 sizeof(struct sk_buff),
110283 0,
110284- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
110285+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
110286+ SLAB_NO_SANITIZE,
110287 NULL);
110288 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
110289 sizeof(struct sk_buff_fclones),
110290 0,
110291- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
110292+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
110293+ SLAB_NO_SANITIZE,
110294 NULL);
110295 }
110296
110297diff --git a/net/core/sock.c b/net/core/sock.c
110298index 71e3e5f..ab90920 100644
110299--- a/net/core/sock.c
110300+++ b/net/core/sock.c
110301@@ -443,7 +443,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110302 struct sk_buff_head *list = &sk->sk_receive_queue;
110303
110304 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
110305- atomic_inc(&sk->sk_drops);
110306+ atomic_inc_unchecked(&sk->sk_drops);
110307 trace_sock_rcvqueue_full(sk, skb);
110308 return -ENOMEM;
110309 }
110310@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110311 return err;
110312
110313 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
110314- atomic_inc(&sk->sk_drops);
110315+ atomic_inc_unchecked(&sk->sk_drops);
110316 return -ENOBUFS;
110317 }
110318
110319@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110320 skb_dst_force(skb);
110321
110322 spin_lock_irqsave(&list->lock, flags);
110323- skb->dropcount = atomic_read(&sk->sk_drops);
110324+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
110325 __skb_queue_tail(list, skb);
110326 spin_unlock_irqrestore(&list->lock, flags);
110327
110328@@ -486,7 +486,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
110329 skb->dev = NULL;
110330
110331 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
110332- atomic_inc(&sk->sk_drops);
110333+ atomic_inc_unchecked(&sk->sk_drops);
110334 goto discard_and_relse;
110335 }
110336 if (nested)
110337@@ -504,7 +504,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
110338 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
110339 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
110340 bh_unlock_sock(sk);
110341- atomic_inc(&sk->sk_drops);
110342+ atomic_inc_unchecked(&sk->sk_drops);
110343 goto discard_and_relse;
110344 }
110345
110346@@ -910,6 +910,7 @@ set_rcvbuf:
110347 }
110348 break;
110349
110350+#ifndef GRKERNSEC_BPF_HARDEN
110351 case SO_ATTACH_BPF:
110352 ret = -EINVAL;
110353 if (optlen == sizeof(u32)) {
110354@@ -922,7 +923,7 @@ set_rcvbuf:
110355 ret = sk_attach_bpf(ufd, sk);
110356 }
110357 break;
110358-
110359+#endif
110360 case SO_DETACH_FILTER:
110361 ret = sk_detach_filter(sk);
110362 break;
110363@@ -1026,12 +1027,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110364 struct timeval tm;
110365 } v;
110366
110367- int lv = sizeof(int);
110368- int len;
110369+ unsigned int lv = sizeof(int);
110370+ unsigned int len;
110371
110372 if (get_user(len, optlen))
110373 return -EFAULT;
110374- if (len < 0)
110375+ if (len > INT_MAX)
110376 return -EINVAL;
110377
110378 memset(&v, 0, sizeof(v));
110379@@ -1169,11 +1170,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110380
110381 case SO_PEERNAME:
110382 {
110383- char address[128];
110384+ char address[_K_SS_MAXSIZE];
110385
110386 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
110387 return -ENOTCONN;
110388- if (lv < len)
110389+ if (lv < len || sizeof address < len)
110390 return -EINVAL;
110391 if (copy_to_user(optval, address, len))
110392 return -EFAULT;
110393@@ -1258,7 +1259,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110394
110395 if (len > lv)
110396 len = lv;
110397- if (copy_to_user(optval, &v, len))
110398+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
110399 return -EFAULT;
110400 lenout:
110401 if (put_user(len, optlen))
110402@@ -2375,7 +2376,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
110403 */
110404 smp_wmb();
110405 atomic_set(&sk->sk_refcnt, 1);
110406- atomic_set(&sk->sk_drops, 0);
110407+ atomic_set_unchecked(&sk->sk_drops, 0);
110408 }
110409 EXPORT_SYMBOL(sock_init_data);
110410
110411@@ -2503,6 +2504,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
110412 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
110413 int level, int type)
110414 {
110415+ struct sock_extended_err ee;
110416 struct sock_exterr_skb *serr;
110417 struct sk_buff *skb;
110418 int copied, err;
110419@@ -2524,7 +2526,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
110420 sock_recv_timestamp(msg, sk, skb);
110421
110422 serr = SKB_EXT_ERR(skb);
110423- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
110424+ ee = serr->ee;
110425+ put_cmsg(msg, level, type, sizeof ee, &ee);
110426
110427 msg->msg_flags |= MSG_ERRQUEUE;
110428 err = copied;
110429diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
110430index ad704c7..ca48aff 100644
110431--- a/net/core/sock_diag.c
110432+++ b/net/core/sock_diag.c
110433@@ -9,26 +9,33 @@
110434 #include <linux/inet_diag.h>
110435 #include <linux/sock_diag.h>
110436
110437-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
110438+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
110439 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
110440 static DEFINE_MUTEX(sock_diag_table_mutex);
110441
110442 int sock_diag_check_cookie(void *sk, __u32 *cookie)
110443 {
110444+#ifndef CONFIG_GRKERNSEC_HIDESYM
110445 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
110446 cookie[1] != INET_DIAG_NOCOOKIE) &&
110447 ((u32)(unsigned long)sk != cookie[0] ||
110448 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
110449 return -ESTALE;
110450 else
110451+#endif
110452 return 0;
110453 }
110454 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
110455
110456 void sock_diag_save_cookie(void *sk, __u32 *cookie)
110457 {
110458+#ifdef CONFIG_GRKERNSEC_HIDESYM
110459+ cookie[0] = 0;
110460+ cookie[1] = 0;
110461+#else
110462 cookie[0] = (u32)(unsigned long)sk;
110463 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
110464+#endif
110465 }
110466 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
110467
110468@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
110469 mutex_lock(&sock_diag_table_mutex);
110470 if (sock_diag_handlers[hndl->family])
110471 err = -EBUSY;
110472- else
110473+ else {
110474+ pax_open_kernel();
110475 sock_diag_handlers[hndl->family] = hndl;
110476+ pax_close_kernel();
110477+ }
110478 mutex_unlock(&sock_diag_table_mutex);
110479
110480 return err;
110481@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
110482
110483 mutex_lock(&sock_diag_table_mutex);
110484 BUG_ON(sock_diag_handlers[family] != hnld);
110485+ pax_open_kernel();
110486 sock_diag_handlers[family] = NULL;
110487+ pax_close_kernel();
110488 mutex_unlock(&sock_diag_table_mutex);
110489 }
110490 EXPORT_SYMBOL_GPL(sock_diag_unregister);
110491diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
110492index 8ce351f..2c388f7 100644
110493--- a/net/core/sysctl_net_core.c
110494+++ b/net/core/sysctl_net_core.c
110495@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
110496 {
110497 unsigned int orig_size, size;
110498 int ret, i;
110499- struct ctl_table tmp = {
110500+ ctl_table_no_const tmp = {
110501 .data = &size,
110502 .maxlen = sizeof(size),
110503 .mode = table->mode
110504@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
110505 void __user *buffer, size_t *lenp, loff_t *ppos)
110506 {
110507 char id[IFNAMSIZ];
110508- struct ctl_table tbl = {
110509+ ctl_table_no_const tbl = {
110510 .data = id,
110511 .maxlen = IFNAMSIZ,
110512 };
110513@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
110514 static int proc_do_rss_key(struct ctl_table *table, int write,
110515 void __user *buffer, size_t *lenp, loff_t *ppos)
110516 {
110517- struct ctl_table fake_table;
110518+ ctl_table_no_const fake_table;
110519 char buf[NETDEV_RSS_KEY_LEN * 3];
110520
110521 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
110522@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
110523 .mode = 0444,
110524 .proc_handler = proc_do_rss_key,
110525 },
110526-#ifdef CONFIG_BPF_JIT
110527+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
110528 {
110529 .procname = "bpf_jit_enable",
110530 .data = &bpf_jit_enable,
110531@@ -411,13 +411,12 @@ static struct ctl_table netns_core_table[] = {
110532
110533 static __net_init int sysctl_core_net_init(struct net *net)
110534 {
110535- struct ctl_table *tbl;
110536+ ctl_table_no_const *tbl = NULL;
110537
110538 net->core.sysctl_somaxconn = SOMAXCONN;
110539
110540- tbl = netns_core_table;
110541 if (!net_eq(net, &init_net)) {
110542- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
110543+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
110544 if (tbl == NULL)
110545 goto err_dup;
110546
110547@@ -427,17 +426,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
110548 if (net->user_ns != &init_user_ns) {
110549 tbl[0].procname = NULL;
110550 }
110551- }
110552-
110553- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
110554+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
110555+ } else
110556+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
110557 if (net->core.sysctl_hdr == NULL)
110558 goto err_reg;
110559
110560 return 0;
110561
110562 err_reg:
110563- if (tbl != netns_core_table)
110564- kfree(tbl);
110565+ kfree(tbl);
110566 err_dup:
110567 return -ENOMEM;
110568 }
110569@@ -452,7 +450,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
110570 kfree(tbl);
110571 }
110572
110573-static __net_initdata struct pernet_operations sysctl_core_ops = {
110574+static __net_initconst struct pernet_operations sysctl_core_ops = {
110575 .init = sysctl_core_net_init,
110576 .exit = sysctl_core_net_exit,
110577 };
110578diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
110579index 8102286..a0c2755 100644
110580--- a/net/decnet/af_decnet.c
110581+++ b/net/decnet/af_decnet.c
110582@@ -466,6 +466,7 @@ static struct proto dn_proto = {
110583 .sysctl_rmem = sysctl_decnet_rmem,
110584 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
110585 .obj_size = sizeof(struct dn_sock),
110586+ .slab_flags = SLAB_USERCOPY,
110587 };
110588
110589 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
110590diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
110591index b2c26b0..41f803e 100644
110592--- a/net/decnet/dn_dev.c
110593+++ b/net/decnet/dn_dev.c
110594@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
110595 .extra1 = &min_t3,
110596 .extra2 = &max_t3
110597 },
110598- {0}
110599+ { }
110600 },
110601 };
110602
110603diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
110604index 5325b54..a0d4d69 100644
110605--- a/net/decnet/sysctl_net_decnet.c
110606+++ b/net/decnet/sysctl_net_decnet.c
110607@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
110608
110609 if (len > *lenp) len = *lenp;
110610
110611- if (copy_to_user(buffer, addr, len))
110612+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
110613 return -EFAULT;
110614
110615 *lenp = len;
110616@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
110617
110618 if (len > *lenp) len = *lenp;
110619
110620- if (copy_to_user(buffer, devname, len))
110621+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
110622 return -EFAULT;
110623
110624 *lenp = len;
110625diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
110626index a2c7e4c..3dc9f67 100644
110627--- a/net/hsr/hsr_netlink.c
110628+++ b/net/hsr/hsr_netlink.c
110629@@ -102,7 +102,7 @@ nla_put_failure:
110630 return -EMSGSIZE;
110631 }
110632
110633-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
110634+static struct rtnl_link_ops hsr_link_ops = {
110635 .kind = "hsr",
110636 .maxtype = IFLA_HSR_MAX,
110637 .policy = hsr_policy,
110638diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
110639index 055fbb7..c0dbe60 100644
110640--- a/net/ieee802154/6lowpan/core.c
110641+++ b/net/ieee802154/6lowpan/core.c
110642@@ -217,7 +217,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
110643 dev_put(real_dev);
110644 }
110645
110646-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
110647+static struct rtnl_link_ops lowpan_link_ops = {
110648 .kind = "lowpan",
110649 .priv_size = sizeof(struct lowpan_dev_info),
110650 .setup = lowpan_setup,
110651diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
110652index f46e4d1..30231f1 100644
110653--- a/net/ieee802154/6lowpan/reassembly.c
110654+++ b/net/ieee802154/6lowpan/reassembly.c
110655@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
110656
110657 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
110658 {
110659- struct ctl_table *table;
110660+ ctl_table_no_const *table = NULL;
110661 struct ctl_table_header *hdr;
110662 struct netns_ieee802154_lowpan *ieee802154_lowpan =
110663 net_ieee802154_lowpan(net);
110664
110665- table = lowpan_frags_ns_ctl_table;
110666 if (!net_eq(net, &init_net)) {
110667- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
110668+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
110669 GFP_KERNEL);
110670 if (table == NULL)
110671 goto err_alloc;
110672@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
110673 /* Don't export sysctls to unprivileged users */
110674 if (net->user_ns != &init_user_ns)
110675 table[0].procname = NULL;
110676- }
110677-
110678- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
110679+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
110680+ } else
110681+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
110682 if (hdr == NULL)
110683 goto err_reg;
110684
110685@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
110686 return 0;
110687
110688 err_reg:
110689- if (!net_eq(net, &init_net))
110690- kfree(table);
110691+ kfree(table);
110692 err_alloc:
110693 return -ENOMEM;
110694 }
110695diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
110696index d2e49ba..f78e8aa 100644
110697--- a/net/ipv4/af_inet.c
110698+++ b/net/ipv4/af_inet.c
110699@@ -1390,7 +1390,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
110700 return ip_recv_error(sk, msg, len, addr_len);
110701 #if IS_ENABLED(CONFIG_IPV6)
110702 if (sk->sk_family == AF_INET6)
110703- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
110704+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
110705 #endif
110706 return -EINVAL;
110707 }
110708diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
110709index 3a8985c..9d2a870 100644
110710--- a/net/ipv4/devinet.c
110711+++ b/net/ipv4/devinet.c
110712@@ -69,7 +69,8 @@
110713
110714 static struct ipv4_devconf ipv4_devconf = {
110715 .data = {
110716- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
110717+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
110718+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
110719 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
110720 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
110721 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
110722@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
110723
110724 static struct ipv4_devconf ipv4_devconf_dflt = {
110725 .data = {
110726- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
110727+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
110728+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
110729 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
110730 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
110731 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
110732@@ -1549,7 +1551,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
110733 idx = 0;
110734 head = &net->dev_index_head[h];
110735 rcu_read_lock();
110736- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
110737+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
110738 net->dev_base_seq;
110739 hlist_for_each_entry_rcu(dev, head, index_hlist) {
110740 if (idx < s_idx)
110741@@ -1868,7 +1870,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
110742 idx = 0;
110743 head = &net->dev_index_head[h];
110744 rcu_read_lock();
110745- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
110746+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
110747 net->dev_base_seq;
110748 hlist_for_each_entry_rcu(dev, head, index_hlist) {
110749 if (idx < s_idx)
110750@@ -2103,7 +2105,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
110751 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
110752 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
110753
110754-static struct devinet_sysctl_table {
110755+static const struct devinet_sysctl_table {
110756 struct ctl_table_header *sysctl_header;
110757 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
110758 } devinet_sysctl = {
110759@@ -2235,7 +2237,7 @@ static __net_init int devinet_init_net(struct net *net)
110760 int err;
110761 struct ipv4_devconf *all, *dflt;
110762 #ifdef CONFIG_SYSCTL
110763- struct ctl_table *tbl = ctl_forward_entry;
110764+ ctl_table_no_const *tbl = NULL;
110765 struct ctl_table_header *forw_hdr;
110766 #endif
110767
110768@@ -2253,7 +2255,7 @@ static __net_init int devinet_init_net(struct net *net)
110769 goto err_alloc_dflt;
110770
110771 #ifdef CONFIG_SYSCTL
110772- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
110773+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
110774 if (tbl == NULL)
110775 goto err_alloc_ctl;
110776
110777@@ -2273,7 +2275,10 @@ static __net_init int devinet_init_net(struct net *net)
110778 goto err_reg_dflt;
110779
110780 err = -ENOMEM;
110781- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
110782+ if (!net_eq(net, &init_net))
110783+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
110784+ else
110785+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
110786 if (forw_hdr == NULL)
110787 goto err_reg_ctl;
110788 net->ipv4.forw_hdr = forw_hdr;
110789@@ -2289,8 +2294,7 @@ err_reg_ctl:
110790 err_reg_dflt:
110791 __devinet_sysctl_unregister(all);
110792 err_reg_all:
110793- if (tbl != ctl_forward_entry)
110794- kfree(tbl);
110795+ kfree(tbl);
110796 err_alloc_ctl:
110797 #endif
110798 if (dflt != &ipv4_devconf_dflt)
110799diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
110800index 23b9b3e..60cf0c4 100644
110801--- a/net/ipv4/fib_frontend.c
110802+++ b/net/ipv4/fib_frontend.c
110803@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
110804 #ifdef CONFIG_IP_ROUTE_MULTIPATH
110805 fib_sync_up(dev);
110806 #endif
110807- atomic_inc(&net->ipv4.dev_addr_genid);
110808+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
110809 rt_cache_flush(dev_net(dev));
110810 break;
110811 case NETDEV_DOWN:
110812 fib_del_ifaddr(ifa, NULL);
110813- atomic_inc(&net->ipv4.dev_addr_genid);
110814+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
110815 if (ifa->ifa_dev->ifa_list == NULL) {
110816 /* Last address was deleted from this interface.
110817 * Disable IP.
110818@@ -1063,7 +1063,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
110819 #ifdef CONFIG_IP_ROUTE_MULTIPATH
110820 fib_sync_up(dev);
110821 #endif
110822- atomic_inc(&net->ipv4.dev_addr_genid);
110823+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
110824 rt_cache_flush(net);
110825 break;
110826 case NETDEV_DOWN:
110827diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
110828index 1e2090e..351a724 100644
110829--- a/net/ipv4/fib_semantics.c
110830+++ b/net/ipv4/fib_semantics.c
110831@@ -753,7 +753,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
110832 nh->nh_saddr = inet_select_addr(nh->nh_dev,
110833 nh->nh_gw,
110834 nh->nh_parent->fib_scope);
110835- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
110836+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
110837
110838 return nh->nh_saddr;
110839 }
110840diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
110841index ff069f6..335e752 100644
110842--- a/net/ipv4/fou.c
110843+++ b/net/ipv4/fou.c
110844@@ -771,12 +771,12 @@ EXPORT_SYMBOL(gue_build_header);
110845
110846 #ifdef CONFIG_NET_FOU_IP_TUNNELS
110847
110848-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
110849+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
110850 .encap_hlen = fou_encap_hlen,
110851 .build_header = fou_build_header,
110852 };
110853
110854-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
110855+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
110856 .encap_hlen = gue_encap_hlen,
110857 .build_header = gue_build_header,
110858 };
110859diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
110860index 9111a4e..3576905 100644
110861--- a/net/ipv4/inet_hashtables.c
110862+++ b/net/ipv4/inet_hashtables.c
110863@@ -18,6 +18,7 @@
110864 #include <linux/sched.h>
110865 #include <linux/slab.h>
110866 #include <linux/wait.h>
110867+#include <linux/security.h>
110868
110869 #include <net/inet_connection_sock.h>
110870 #include <net/inet_hashtables.h>
110871@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
110872 return inet_ehashfn(net, laddr, lport, faddr, fport);
110873 }
110874
110875+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
110876+
110877 /*
110878 * Allocate and initialize a new local port bind bucket.
110879 * The bindhash mutex for snum's hash chain must be held here.
110880@@ -554,6 +557,8 @@ ok:
110881 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
110882 spin_unlock(&head->lock);
110883
110884+ gr_update_task_in_ip_table(inet_sk(sk));
110885+
110886 if (tw) {
110887 inet_twsk_deschedule(tw, death_row);
110888 while (twrefcnt) {
110889diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
110890index 241afd7..31b95d5 100644
110891--- a/net/ipv4/inetpeer.c
110892+++ b/net/ipv4/inetpeer.c
110893@@ -461,7 +461,7 @@ relookup:
110894 if (p) {
110895 p->daddr = *daddr;
110896 atomic_set(&p->refcnt, 1);
110897- atomic_set(&p->rid, 0);
110898+ atomic_set_unchecked(&p->rid, 0);
110899 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
110900 p->rate_tokens = 0;
110901 /* 60*HZ is arbitrary, but chosen enough high so that the first
110902diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
110903index 145a50c..5dd8cc5 100644
110904--- a/net/ipv4/ip_fragment.c
110905+++ b/net/ipv4/ip_fragment.c
110906@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
110907 return 0;
110908
110909 start = qp->rid;
110910- end = atomic_inc_return(&peer->rid);
110911+ end = atomic_inc_return_unchecked(&peer->rid);
110912 qp->rid = end;
110913
110914 rc = qp->q.fragments && (end - start) > max;
110915@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
110916
110917 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
110918 {
110919- struct ctl_table *table;
110920+ ctl_table_no_const *table = NULL;
110921 struct ctl_table_header *hdr;
110922
110923- table = ip4_frags_ns_ctl_table;
110924 if (!net_eq(net, &init_net)) {
110925- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
110926+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
110927 if (table == NULL)
110928 goto err_alloc;
110929
110930@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
110931 /* Don't export sysctls to unprivileged users */
110932 if (net->user_ns != &init_user_ns)
110933 table[0].procname = NULL;
110934- }
110935+ hdr = register_net_sysctl(net, "net/ipv4", table);
110936+ } else
110937+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
110938
110939- hdr = register_net_sysctl(net, "net/ipv4", table);
110940 if (hdr == NULL)
110941 goto err_reg;
110942
110943@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
110944 return 0;
110945
110946 err_reg:
110947- if (!net_eq(net, &init_net))
110948- kfree(table);
110949+ kfree(table);
110950 err_alloc:
110951 return -ENOMEM;
110952 }
110953diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
110954index 6207275f..00323a2 100644
110955--- a/net/ipv4/ip_gre.c
110956+++ b/net/ipv4/ip_gre.c
110957@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
110958 module_param(log_ecn_error, bool, 0644);
110959 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
110960
110961-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
110962+static struct rtnl_link_ops ipgre_link_ops;
110963 static int ipgre_tunnel_init(struct net_device *dev);
110964
110965 static int ipgre_net_id __read_mostly;
110966@@ -817,7 +817,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
110967 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
110968 };
110969
110970-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
110971+static struct rtnl_link_ops ipgre_link_ops = {
110972 .kind = "gre",
110973 .maxtype = IFLA_GRE_MAX,
110974 .policy = ipgre_policy,
110975@@ -832,7 +832,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
110976 .get_link_net = ip_tunnel_get_link_net,
110977 };
110978
110979-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
110980+static struct rtnl_link_ops ipgre_tap_ops = {
110981 .kind = "gretap",
110982 .maxtype = IFLA_GRE_MAX,
110983 .policy = ipgre_policy,
110984diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
110985index 3d4da2c..40f9c29 100644
110986--- a/net/ipv4/ip_input.c
110987+++ b/net/ipv4/ip_input.c
110988@@ -147,6 +147,10 @@
110989 #include <linux/mroute.h>
110990 #include <linux/netlink.h>
110991
110992+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
110993+extern int grsec_enable_blackhole;
110994+#endif
110995+
110996 /*
110997 * Process Router Attention IP option (RFC 2113)
110998 */
110999@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
111000 if (!raw) {
111001 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
111002 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
111003+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111004+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
111005+#endif
111006 icmp_send(skb, ICMP_DEST_UNREACH,
111007 ICMP_PROT_UNREACH, 0);
111008 }
111009diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
111010index 5cd9927..8610b9f 100644
111011--- a/net/ipv4/ip_sockglue.c
111012+++ b/net/ipv4/ip_sockglue.c
111013@@ -1254,7 +1254,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
111014 len = min_t(unsigned int, len, opt->optlen);
111015 if (put_user(len, optlen))
111016 return -EFAULT;
111017- if (copy_to_user(optval, opt->__data, len))
111018+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
111019+ copy_to_user(optval, opt->__data, len))
111020 return -EFAULT;
111021 return 0;
111022 }
111023@@ -1388,7 +1389,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
111024 if (sk->sk_type != SOCK_STREAM)
111025 return -ENOPROTOOPT;
111026
111027- msg.msg_control = (__force void *) optval;
111028+ msg.msg_control = (__force_kernel void *) optval;
111029 msg.msg_controllen = len;
111030 msg.msg_flags = flags;
111031
111032diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
111033index 94efe14..1453fcc 100644
111034--- a/net/ipv4/ip_vti.c
111035+++ b/net/ipv4/ip_vti.c
111036@@ -45,7 +45,7 @@
111037 #include <net/net_namespace.h>
111038 #include <net/netns/generic.h>
111039
111040-static struct rtnl_link_ops vti_link_ops __read_mostly;
111041+static struct rtnl_link_ops vti_link_ops;
111042
111043 static int vti_net_id __read_mostly;
111044 static int vti_tunnel_init(struct net_device *dev);
111045@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
111046 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
111047 };
111048
111049-static struct rtnl_link_ops vti_link_ops __read_mostly = {
111050+static struct rtnl_link_ops vti_link_ops = {
111051 .kind = "vti",
111052 .maxtype = IFLA_VTI_MAX,
111053 .policy = vti_policy,
111054diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
111055index b26376e..fc3d733 100644
111056--- a/net/ipv4/ipconfig.c
111057+++ b/net/ipv4/ipconfig.c
111058@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
111059
111060 mm_segment_t oldfs = get_fs();
111061 set_fs(get_ds());
111062- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
111063+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
111064 set_fs(oldfs);
111065 return res;
111066 }
111067@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
111068
111069 mm_segment_t oldfs = get_fs();
111070 set_fs(get_ds());
111071- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
111072+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
111073 set_fs(oldfs);
111074 return res;
111075 }
111076@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
111077
111078 mm_segment_t oldfs = get_fs();
111079 set_fs(get_ds());
111080- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
111081+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
111082 set_fs(oldfs);
111083 return res;
111084 }
111085diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
111086index 915d215..48d1db7 100644
111087--- a/net/ipv4/ipip.c
111088+++ b/net/ipv4/ipip.c
111089@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
111090 static int ipip_net_id __read_mostly;
111091
111092 static int ipip_tunnel_init(struct net_device *dev);
111093-static struct rtnl_link_ops ipip_link_ops __read_mostly;
111094+static struct rtnl_link_ops ipip_link_ops;
111095
111096 static int ipip_err(struct sk_buff *skb, u32 info)
111097 {
111098@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
111099 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
111100 };
111101
111102-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
111103+static struct rtnl_link_ops ipip_link_ops = {
111104 .kind = "ipip",
111105 .maxtype = IFLA_IPTUN_MAX,
111106 .policy = ipip_policy,
111107diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
111108index f95b6f9..2ee2097 100644
111109--- a/net/ipv4/netfilter/arp_tables.c
111110+++ b/net/ipv4/netfilter/arp_tables.c
111111@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
111112 #endif
111113
111114 static int get_info(struct net *net, void __user *user,
111115- const int *len, int compat)
111116+ int len, int compat)
111117 {
111118 char name[XT_TABLE_MAXNAMELEN];
111119 struct xt_table *t;
111120 int ret;
111121
111122- if (*len != sizeof(struct arpt_getinfo)) {
111123- duprintf("length %u != %Zu\n", *len,
111124+ if (len != sizeof(struct arpt_getinfo)) {
111125+ duprintf("length %u != %Zu\n", len,
111126 sizeof(struct arpt_getinfo));
111127 return -EINVAL;
111128 }
111129@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
111130 info.size = private->size;
111131 strcpy(info.name, name);
111132
111133- if (copy_to_user(user, &info, *len) != 0)
111134+ if (copy_to_user(user, &info, len) != 0)
111135 ret = -EFAULT;
111136 else
111137 ret = 0;
111138@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
111139
111140 switch (cmd) {
111141 case ARPT_SO_GET_INFO:
111142- ret = get_info(sock_net(sk), user, len, 1);
111143+ ret = get_info(sock_net(sk), user, *len, 1);
111144 break;
111145 case ARPT_SO_GET_ENTRIES:
111146 ret = compat_get_entries(sock_net(sk), user, len);
111147@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
111148
111149 switch (cmd) {
111150 case ARPT_SO_GET_INFO:
111151- ret = get_info(sock_net(sk), user, len, 0);
111152+ ret = get_info(sock_net(sk), user, *len, 0);
111153 break;
111154
111155 case ARPT_SO_GET_ENTRIES:
111156diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
111157index cf5e82f..75a20f5 100644
111158--- a/net/ipv4/netfilter/ip_tables.c
111159+++ b/net/ipv4/netfilter/ip_tables.c
111160@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
111161 #endif
111162
111163 static int get_info(struct net *net, void __user *user,
111164- const int *len, int compat)
111165+ int len, int compat)
111166 {
111167 char name[XT_TABLE_MAXNAMELEN];
111168 struct xt_table *t;
111169 int ret;
111170
111171- if (*len != sizeof(struct ipt_getinfo)) {
111172- duprintf("length %u != %zu\n", *len,
111173+ if (len != sizeof(struct ipt_getinfo)) {
111174+ duprintf("length %u != %zu\n", len,
111175 sizeof(struct ipt_getinfo));
111176 return -EINVAL;
111177 }
111178@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
111179 info.size = private->size;
111180 strcpy(info.name, name);
111181
111182- if (copy_to_user(user, &info, *len) != 0)
111183+ if (copy_to_user(user, &info, len) != 0)
111184 ret = -EFAULT;
111185 else
111186 ret = 0;
111187@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
111188
111189 switch (cmd) {
111190 case IPT_SO_GET_INFO:
111191- ret = get_info(sock_net(sk), user, len, 1);
111192+ ret = get_info(sock_net(sk), user, *len, 1);
111193 break;
111194 case IPT_SO_GET_ENTRIES:
111195 ret = compat_get_entries(sock_net(sk), user, len);
111196@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
111197
111198 switch (cmd) {
111199 case IPT_SO_GET_INFO:
111200- ret = get_info(sock_net(sk), user, len, 0);
111201+ ret = get_info(sock_net(sk), user, *len, 0);
111202 break;
111203
111204 case IPT_SO_GET_ENTRIES:
111205diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
111206index e90f83a..3e6acca 100644
111207--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
111208+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
111209@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
111210 spin_lock_init(&cn->lock);
111211
111212 #ifdef CONFIG_PROC_FS
111213- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
111214+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
111215 if (!cn->procdir) {
111216 pr_err("Unable to proc dir entry\n");
111217 return -ENOMEM;
111218diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
111219index 787b0d6..ab6c0ba 100644
111220--- a/net/ipv4/ping.c
111221+++ b/net/ipv4/ping.c
111222@@ -59,7 +59,7 @@ struct ping_table {
111223 };
111224
111225 static struct ping_table ping_table;
111226-struct pingv6_ops pingv6_ops;
111227+struct pingv6_ops *pingv6_ops;
111228 EXPORT_SYMBOL_GPL(pingv6_ops);
111229
111230 static u16 ping_port_rover;
111231@@ -359,7 +359,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
111232 return -ENODEV;
111233 }
111234 }
111235- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
111236+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
111237 scoped);
111238 rcu_read_unlock();
111239
111240@@ -567,7 +567,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
111241 }
111242 #if IS_ENABLED(CONFIG_IPV6)
111243 } else if (skb->protocol == htons(ETH_P_IPV6)) {
111244- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
111245+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
111246 #endif
111247 }
111248
111249@@ -585,7 +585,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
111250 info, (u8 *)icmph);
111251 #if IS_ENABLED(CONFIG_IPV6)
111252 } else if (family == AF_INET6) {
111253- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
111254+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
111255 info, (u8 *)icmph);
111256 #endif
111257 }
111258@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
111259 }
111260
111261 if (inet6_sk(sk)->rxopt.all)
111262- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
111263+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
111264 if (skb->protocol == htons(ETH_P_IPV6) &&
111265 inet6_sk(sk)->rxopt.all)
111266- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
111267+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
111268 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
111269 ip_cmsg_recv(msg, skb);
111270 #endif
111271@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
111272 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
111273 0, sock_i_ino(sp),
111274 atomic_read(&sp->sk_refcnt), sp,
111275- atomic_read(&sp->sk_drops));
111276+ atomic_read_unchecked(&sp->sk_drops));
111277 }
111278
111279 static int ping_v4_seq_show(struct seq_file *seq, void *v)
111280diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
111281index f027a70..2e64edc 100644
111282--- a/net/ipv4/raw.c
111283+++ b/net/ipv4/raw.c
111284@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
111285 int raw_rcv(struct sock *sk, struct sk_buff *skb)
111286 {
111287 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
111288- atomic_inc(&sk->sk_drops);
111289+ atomic_inc_unchecked(&sk->sk_drops);
111290 kfree_skb(skb);
111291 return NET_RX_DROP;
111292 }
111293@@ -773,16 +773,20 @@ static int raw_init(struct sock *sk)
111294
111295 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
111296 {
111297+ struct icmp_filter filter;
111298+
111299 if (optlen > sizeof(struct icmp_filter))
111300 optlen = sizeof(struct icmp_filter);
111301- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
111302+ if (copy_from_user(&filter, optval, optlen))
111303 return -EFAULT;
111304+ raw_sk(sk)->filter = filter;
111305 return 0;
111306 }
111307
111308 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
111309 {
111310 int len, ret = -EFAULT;
111311+ struct icmp_filter filter;
111312
111313 if (get_user(len, optlen))
111314 goto out;
111315@@ -792,8 +796,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
111316 if (len > sizeof(struct icmp_filter))
111317 len = sizeof(struct icmp_filter);
111318 ret = -EFAULT;
111319- if (put_user(len, optlen) ||
111320- copy_to_user(optval, &raw_sk(sk)->filter, len))
111321+ filter = raw_sk(sk)->filter;
111322+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
111323 goto out;
111324 ret = 0;
111325 out: return ret;
111326@@ -1022,7 +1026,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
111327 0, 0L, 0,
111328 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
111329 0, sock_i_ino(sp),
111330- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
111331+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
111332 }
111333
111334 static int raw_seq_show(struct seq_file *seq, void *v)
111335diff --git a/net/ipv4/route.c b/net/ipv4/route.c
111336index 20fc020..3ba426f 100644
111337--- a/net/ipv4/route.c
111338+++ b/net/ipv4/route.c
111339@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
111340
111341 static int rt_cache_seq_open(struct inode *inode, struct file *file)
111342 {
111343- return seq_open(file, &rt_cache_seq_ops);
111344+ return seq_open_restrict(file, &rt_cache_seq_ops);
111345 }
111346
111347 static const struct file_operations rt_cache_seq_fops = {
111348@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
111349
111350 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
111351 {
111352- return seq_open(file, &rt_cpu_seq_ops);
111353+ return seq_open_restrict(file, &rt_cpu_seq_ops);
111354 }
111355
111356 static const struct file_operations rt_cpu_seq_fops = {
111357@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
111358
111359 static int rt_acct_proc_open(struct inode *inode, struct file *file)
111360 {
111361- return single_open(file, rt_acct_proc_show, NULL);
111362+ return single_open_restrict(file, rt_acct_proc_show, NULL);
111363 }
111364
111365 static const struct file_operations rt_acct_proc_fops = {
111366@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
111367
111368 #define IP_IDENTS_SZ 2048u
111369 struct ip_ident_bucket {
111370- atomic_t id;
111371+ atomic_unchecked_t id;
111372 u32 stamp32;
111373 };
111374
111375-static struct ip_ident_bucket *ip_idents __read_mostly;
111376+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
111377
111378 /* In order to protect privacy, we add a perturbation to identifiers
111379 * if one generator is seldom used. This makes hard for an attacker
111380@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
111381 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
111382 delta = prandom_u32_max(now - old);
111383
111384- return atomic_add_return(segs + delta, &bucket->id) - segs;
111385+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
111386 }
111387 EXPORT_SYMBOL(ip_idents_reserve);
111388
111389@@ -2639,34 +2639,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
111390 .maxlen = sizeof(int),
111391 .mode = 0200,
111392 .proc_handler = ipv4_sysctl_rtcache_flush,
111393+ .extra1 = &init_net,
111394 },
111395 { },
111396 };
111397
111398 static __net_init int sysctl_route_net_init(struct net *net)
111399 {
111400- struct ctl_table *tbl;
111401+ ctl_table_no_const *tbl = NULL;
111402
111403- tbl = ipv4_route_flush_table;
111404 if (!net_eq(net, &init_net)) {
111405- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
111406+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
111407 if (tbl == NULL)
111408 goto err_dup;
111409
111410 /* Don't export sysctls to unprivileged users */
111411 if (net->user_ns != &init_user_ns)
111412 tbl[0].procname = NULL;
111413- }
111414- tbl[0].extra1 = net;
111415+ tbl[0].extra1 = net;
111416+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
111417+ } else
111418+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
111419
111420- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
111421 if (net->ipv4.route_hdr == NULL)
111422 goto err_reg;
111423 return 0;
111424
111425 err_reg:
111426- if (tbl != ipv4_route_flush_table)
111427- kfree(tbl);
111428+ kfree(tbl);
111429 err_dup:
111430 return -ENOMEM;
111431 }
111432@@ -2689,8 +2689,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
111433
111434 static __net_init int rt_genid_init(struct net *net)
111435 {
111436- atomic_set(&net->ipv4.rt_genid, 0);
111437- atomic_set(&net->fnhe_genid, 0);
111438+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
111439+ atomic_set_unchecked(&net->fnhe_genid, 0);
111440 get_random_bytes(&net->ipv4.dev_addr_genid,
111441 sizeof(net->ipv4.dev_addr_genid));
111442 return 0;
111443@@ -2734,11 +2734,7 @@ int __init ip_rt_init(void)
111444 int rc = 0;
111445 int cpu;
111446
111447- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
111448- if (!ip_idents)
111449- panic("IP: failed to allocate ip_idents\n");
111450-
111451- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
111452+ prandom_bytes(ip_idents, sizeof(ip_idents));
111453
111454 for_each_possible_cpu(cpu) {
111455 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
111456diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
111457index d151539..5f5e247 100644
111458--- a/net/ipv4/sysctl_net_ipv4.c
111459+++ b/net/ipv4/sysctl_net_ipv4.c
111460@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
111461 container_of(table->data, struct net, ipv4.ip_local_ports.range);
111462 int ret;
111463 int range[2];
111464- struct ctl_table tmp = {
111465+ ctl_table_no_const tmp = {
111466 .data = &range,
111467 .maxlen = sizeof(range),
111468 .mode = table->mode,
111469@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
111470 int ret;
111471 gid_t urange[2];
111472 kgid_t low, high;
111473- struct ctl_table tmp = {
111474+ ctl_table_no_const tmp = {
111475 .data = &urange,
111476 .maxlen = sizeof(urange),
111477 .mode = table->mode,
111478@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
111479 void __user *buffer, size_t *lenp, loff_t *ppos)
111480 {
111481 char val[TCP_CA_NAME_MAX];
111482- struct ctl_table tbl = {
111483+ ctl_table_no_const tbl = {
111484 .data = val,
111485 .maxlen = TCP_CA_NAME_MAX,
111486 };
111487@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
111488 void __user *buffer, size_t *lenp,
111489 loff_t *ppos)
111490 {
111491- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
111492+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
111493 int ret;
111494
111495 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
111496@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
111497 void __user *buffer, size_t *lenp,
111498 loff_t *ppos)
111499 {
111500- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
111501+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
111502 int ret;
111503
111504 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
111505@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
111506 void __user *buffer, size_t *lenp,
111507 loff_t *ppos)
111508 {
111509- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
111510+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
111511 struct tcp_fastopen_context *ctxt;
111512 int ret;
111513 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
111514@@ -888,13 +888,12 @@ static struct ctl_table ipv4_net_table[] = {
111515
111516 static __net_init int ipv4_sysctl_init_net(struct net *net)
111517 {
111518- struct ctl_table *table;
111519+ ctl_table_no_const *table = NULL;
111520
111521- table = ipv4_net_table;
111522 if (!net_eq(net, &init_net)) {
111523 int i;
111524
111525- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
111526+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
111527 if (table == NULL)
111528 goto err_alloc;
111529
111530@@ -903,7 +902,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
111531 table[i].data += (void *)net - (void *)&init_net;
111532 }
111533
111534- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
111535+ if (!net_eq(net, &init_net))
111536+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
111537+ else
111538+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
111539 if (net->ipv4.ipv4_hdr == NULL)
111540 goto err_reg;
111541
111542diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
111543index 995a225..e1e9183 100644
111544--- a/net/ipv4/tcp.c
111545+++ b/net/ipv4/tcp.c
111546@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
111547
111548 /* Race breaker. If space is freed after
111549 * wspace test but before the flags are set,
111550- * IO signal will be lost.
111551+ * IO signal will be lost. Memory barrier
111552+ * pairs with the input side.
111553 */
111554+ smp_mb__after_atomic();
111555 if (sk_stream_is_writeable(sk))
111556 mask |= POLLOUT | POLLWRNORM;
111557 }
111558diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
111559index f501ac04..0c5a1b2 100644
111560--- a/net/ipv4/tcp_input.c
111561+++ b/net/ipv4/tcp_input.c
111562@@ -767,7 +767,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
111563 * without any lock. We want to make sure compiler wont store
111564 * intermediate values in this location.
111565 */
111566- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
111567+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
111568 sk->sk_max_pacing_rate);
111569 }
111570
111571@@ -4541,7 +4541,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
111572 * simplifies code)
111573 */
111574 static void
111575-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
111576+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
111577 struct sk_buff *head, struct sk_buff *tail,
111578 u32 start, u32 end)
111579 {
111580@@ -4799,6 +4799,8 @@ static void tcp_check_space(struct sock *sk)
111581 {
111582 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
111583 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
111584+ /* pairs with tcp_poll() */
111585+ smp_mb__after_atomic();
111586 if (sk->sk_socket &&
111587 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
111588 tcp_new_space(sk);
111589@@ -5525,6 +5527,7 @@ discard:
111590 tcp_paws_reject(&tp->rx_opt, 0))
111591 goto discard_and_undo;
111592
111593+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
111594 if (th->syn) {
111595 /* We see SYN without ACK. It is attempt of
111596 * simultaneous connect with crossed SYNs.
111597@@ -5575,6 +5578,7 @@ discard:
111598 goto discard;
111599 #endif
111600 }
111601+#endif
111602 /* "fifth, if neither of the SYN or RST bits is set then
111603 * drop the segment and return."
111604 */
111605@@ -5621,7 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
111606 goto discard;
111607
111608 if (th->syn) {
111609- if (th->fin)
111610+ if (th->fin || th->urg || th->psh)
111611 goto discard;
111612 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
111613 return 1;
111614diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
111615index f1756ee..8908cb0 100644
111616--- a/net/ipv4/tcp_ipv4.c
111617+++ b/net/ipv4/tcp_ipv4.c
111618@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
111619 int sysctl_tcp_low_latency __read_mostly;
111620 EXPORT_SYMBOL(sysctl_tcp_low_latency);
111621
111622+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111623+extern int grsec_enable_blackhole;
111624+#endif
111625+
111626 #ifdef CONFIG_TCP_MD5SIG
111627 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
111628 __be32 daddr, __be32 saddr, const struct tcphdr *th);
111629@@ -1475,6 +1479,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
111630 return 0;
111631
111632 reset:
111633+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111634+ if (!grsec_enable_blackhole)
111635+#endif
111636 tcp_v4_send_reset(rsk, skb);
111637 discard:
111638 kfree_skb(skb);
111639@@ -1639,12 +1646,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
111640 TCP_SKB_CB(skb)->sacked = 0;
111641
111642 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
111643- if (!sk)
111644+ if (!sk) {
111645+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111646+ ret = 1;
111647+#endif
111648 goto no_tcp_socket;
111649-
111650+ }
111651 process:
111652- if (sk->sk_state == TCP_TIME_WAIT)
111653+ if (sk->sk_state == TCP_TIME_WAIT) {
111654+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111655+ ret = 2;
111656+#endif
111657 goto do_time_wait;
111658+ }
111659
111660 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
111661 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
111662@@ -1700,6 +1714,10 @@ csum_error:
111663 bad_packet:
111664 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
111665 } else {
111666+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111667+ if (!grsec_enable_blackhole || (ret == 1 &&
111668+ (skb->dev->flags & IFF_LOOPBACK)))
111669+#endif
111670 tcp_v4_send_reset(NULL, skb);
111671 }
111672
111673diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
111674index dd11ac7..c0872da 100644
111675--- a/net/ipv4/tcp_minisocks.c
111676+++ b/net/ipv4/tcp_minisocks.c
111677@@ -27,6 +27,10 @@
111678 #include <net/inet_common.h>
111679 #include <net/xfrm.h>
111680
111681+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111682+extern int grsec_enable_blackhole;
111683+#endif
111684+
111685 int sysctl_tcp_syncookies __read_mostly = 1;
111686 EXPORT_SYMBOL(sysctl_tcp_syncookies);
111687
111688@@ -785,7 +789,10 @@ embryonic_reset:
111689 * avoid becoming vulnerable to outside attack aiming at
111690 * resetting legit local connections.
111691 */
111692- req->rsk_ops->send_reset(sk, skb);
111693+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111694+ if (!grsec_enable_blackhole)
111695+#endif
111696+ req->rsk_ops->send_reset(sk, skb);
111697 } else if (fastopen) { /* received a valid RST pkt */
111698 reqsk_fastopen_remove(sk, req, true);
111699 tcp_reset(sk);
111700diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
111701index ebf5ff5..4d1ff32 100644
111702--- a/net/ipv4/tcp_probe.c
111703+++ b/net/ipv4/tcp_probe.c
111704@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
111705 if (cnt + width >= len)
111706 break;
111707
111708- if (copy_to_user(buf + cnt, tbuf, width))
111709+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
111710 return -EFAULT;
111711 cnt += width;
111712 }
111713diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
111714index 0732b78..a82bdc6 100644
111715--- a/net/ipv4/tcp_timer.c
111716+++ b/net/ipv4/tcp_timer.c
111717@@ -22,6 +22,10 @@
111718 #include <linux/gfp.h>
111719 #include <net/tcp.h>
111720
111721+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111722+extern int grsec_lastack_retries;
111723+#endif
111724+
111725 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
111726 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
111727 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
111728@@ -194,6 +198,13 @@ static int tcp_write_timeout(struct sock *sk)
111729 }
111730 }
111731
111732+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111733+ if ((sk->sk_state == TCP_LAST_ACK) &&
111734+ (grsec_lastack_retries > 0) &&
111735+ (grsec_lastack_retries < retry_until))
111736+ retry_until = grsec_lastack_retries;
111737+#endif
111738+
111739 if (retransmits_timed_out(sk, retry_until,
111740 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
111741 /* Has it gone just too far? */
111742diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
111743index 97ef1f8b..e446c33 100644
111744--- a/net/ipv4/udp.c
111745+++ b/net/ipv4/udp.c
111746@@ -87,6 +87,7 @@
111747 #include <linux/types.h>
111748 #include <linux/fcntl.h>
111749 #include <linux/module.h>
111750+#include <linux/security.h>
111751 #include <linux/socket.h>
111752 #include <linux/sockios.h>
111753 #include <linux/igmp.h>
111754@@ -114,6 +115,10 @@
111755 #include <net/busy_poll.h>
111756 #include "udp_impl.h"
111757
111758+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111759+extern int grsec_enable_blackhole;
111760+#endif
111761+
111762 struct udp_table udp_table __read_mostly;
111763 EXPORT_SYMBOL(udp_table);
111764
111765@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
111766 return true;
111767 }
111768
111769+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
111770+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
111771+
111772 /*
111773 * This routine is called by the ICMP module when it gets some
111774 * sort of error condition. If err < 0 then the socket should
111775@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
111776 dport = usin->sin_port;
111777 if (dport == 0)
111778 return -EINVAL;
111779+
111780+ err = gr_search_udp_sendmsg(sk, usin);
111781+ if (err)
111782+ return err;
111783 } else {
111784 if (sk->sk_state != TCP_ESTABLISHED)
111785 return -EDESTADDRREQ;
111786+
111787+ err = gr_search_udp_sendmsg(sk, NULL);
111788+ if (err)
111789+ return err;
111790+
111791 daddr = inet->inet_daddr;
111792 dport = inet->inet_dport;
111793 /* Open fast path for connected socket.
111794@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
111795 IS_UDPLITE(sk));
111796 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
111797 IS_UDPLITE(sk));
111798- atomic_inc(&sk->sk_drops);
111799+ atomic_inc_unchecked(&sk->sk_drops);
111800 __skb_unlink(skb, rcvq);
111801 __skb_queue_tail(&list_kill, skb);
111802 }
111803@@ -1275,6 +1292,10 @@ try_again:
111804 if (!skb)
111805 goto out;
111806
111807+ err = gr_search_udp_recvmsg(sk, skb);
111808+ if (err)
111809+ goto out_free;
111810+
111811 ulen = skb->len - sizeof(struct udphdr);
111812 copied = len;
111813 if (copied > ulen)
111814@@ -1307,7 +1328,7 @@ try_again:
111815 if (unlikely(err)) {
111816 trace_kfree_skb(skb, udp_recvmsg);
111817 if (!peeked) {
111818- atomic_inc(&sk->sk_drops);
111819+ atomic_inc_unchecked(&sk->sk_drops);
111820 UDP_INC_STATS_USER(sock_net(sk),
111821 UDP_MIB_INERRORS, is_udplite);
111822 }
111823@@ -1605,7 +1626,7 @@ csum_error:
111824 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
111825 drop:
111826 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
111827- atomic_inc(&sk->sk_drops);
111828+ atomic_inc_unchecked(&sk->sk_drops);
111829 kfree_skb(skb);
111830 return -1;
111831 }
111832@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
111833 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
111834
111835 if (!skb1) {
111836- atomic_inc(&sk->sk_drops);
111837+ atomic_inc_unchecked(&sk->sk_drops);
111838 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
111839 IS_UDPLITE(sk));
111840 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
111841@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
111842 goto csum_error;
111843
111844 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
111845+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111846+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
111847+#endif
111848 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
111849
111850 /*
111851@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
111852 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
111853 0, sock_i_ino(sp),
111854 atomic_read(&sp->sk_refcnt), sp,
111855- atomic_read(&sp->sk_drops));
111856+ atomic_read_unchecked(&sp->sk_drops));
111857 }
111858
111859 int udp4_seq_show(struct seq_file *seq, void *v)
111860diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
111861index 6156f68..d6ab46d 100644
111862--- a/net/ipv4/xfrm4_policy.c
111863+++ b/net/ipv4/xfrm4_policy.c
111864@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
111865 fl4->flowi4_tos = iph->tos;
111866 }
111867
111868-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
111869+static int xfrm4_garbage_collect(struct dst_ops *ops)
111870 {
111871 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
111872
111873- xfrm4_policy_afinfo.garbage_collect(net);
111874+ xfrm_garbage_collect_deferred(net);
111875 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
111876 }
111877
111878@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
111879
111880 static int __net_init xfrm4_net_init(struct net *net)
111881 {
111882- struct ctl_table *table;
111883+ ctl_table_no_const *table = NULL;
111884 struct ctl_table_header *hdr;
111885
111886- table = xfrm4_policy_table;
111887 if (!net_eq(net, &init_net)) {
111888- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
111889+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
111890 if (!table)
111891 goto err_alloc;
111892
111893 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
111894- }
111895-
111896- hdr = register_net_sysctl(net, "net/ipv4", table);
111897+ hdr = register_net_sysctl(net, "net/ipv4", table);
111898+ } else
111899+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
111900 if (!hdr)
111901 goto err_reg;
111902
111903@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
111904 return 0;
111905
111906 err_reg:
111907- if (!net_eq(net, &init_net))
111908- kfree(table);
111909+ kfree(table);
111910 err_alloc:
111911 return -ENOMEM;
111912 }
111913diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
111914index b603002..0de5c88 100644
111915--- a/net/ipv6/addrconf.c
111916+++ b/net/ipv6/addrconf.c
111917@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
111918 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
111919 .mtu6 = IPV6_MIN_MTU,
111920 .accept_ra = 1,
111921- .accept_redirects = 1,
111922+ .accept_redirects = 0,
111923 .autoconf = 1,
111924 .force_mld_version = 0,
111925 .mldv1_unsolicited_report_interval = 10 * HZ,
111926@@ -209,7 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
111927 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
111928 .mtu6 = IPV6_MIN_MTU,
111929 .accept_ra = 1,
111930- .accept_redirects = 1,
111931+ .accept_redirects = 0,
111932 .autoconf = 1,
111933 .force_mld_version = 0,
111934 .mldv1_unsolicited_report_interval = 10 * HZ,
111935@@ -607,7 +607,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
111936 idx = 0;
111937 head = &net->dev_index_head[h];
111938 rcu_read_lock();
111939- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
111940+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
111941 net->dev_base_seq;
111942 hlist_for_each_entry_rcu(dev, head, index_hlist) {
111943 if (idx < s_idx)
111944@@ -2438,7 +2438,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
111945 p.iph.ihl = 5;
111946 p.iph.protocol = IPPROTO_IPV6;
111947 p.iph.ttl = 64;
111948- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
111949+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
111950
111951 if (ops->ndo_do_ioctl) {
111952 mm_segment_t oldfs = get_fs();
111953@@ -3587,16 +3587,23 @@ static const struct file_operations if6_fops = {
111954 .release = seq_release_net,
111955 };
111956
111957+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
111958+extern void unregister_ipv6_seq_ops_addr(void);
111959+
111960 static int __net_init if6_proc_net_init(struct net *net)
111961 {
111962- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
111963+ register_ipv6_seq_ops_addr(&if6_seq_ops);
111964+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
111965+ unregister_ipv6_seq_ops_addr();
111966 return -ENOMEM;
111967+ }
111968 return 0;
111969 }
111970
111971 static void __net_exit if6_proc_net_exit(struct net *net)
111972 {
111973 remove_proc_entry("if_inet6", net->proc_net);
111974+ unregister_ipv6_seq_ops_addr();
111975 }
111976
111977 static struct pernet_operations if6_proc_net_ops = {
111978@@ -4215,7 +4222,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
111979 s_ip_idx = ip_idx = cb->args[2];
111980
111981 rcu_read_lock();
111982- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
111983+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
111984 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
111985 idx = 0;
111986 head = &net->dev_index_head[h];
111987@@ -4864,7 +4871,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
111988 rt_genid_bump_ipv6(net);
111989 break;
111990 }
111991- atomic_inc(&net->ipv6.dev_addr_genid);
111992+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
111993 }
111994
111995 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
111996@@ -4884,7 +4891,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
111997 int *valp = ctl->data;
111998 int val = *valp;
111999 loff_t pos = *ppos;
112000- struct ctl_table lctl;
112001+ ctl_table_no_const lctl;
112002 int ret;
112003
112004 /*
112005@@ -4909,7 +4916,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
112006 {
112007 struct inet6_dev *idev = ctl->extra1;
112008 int min_mtu = IPV6_MIN_MTU;
112009- struct ctl_table lctl;
112010+ ctl_table_no_const lctl;
112011
112012 lctl = *ctl;
112013 lctl.extra1 = &min_mtu;
112014@@ -4984,7 +4991,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
112015 int *valp = ctl->data;
112016 int val = *valp;
112017 loff_t pos = *ppos;
112018- struct ctl_table lctl;
112019+ ctl_table_no_const lctl;
112020 int ret;
112021
112022 /*
112023diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
112024index e8c4400..a4cd5da 100644
112025--- a/net/ipv6/af_inet6.c
112026+++ b/net/ipv6/af_inet6.c
112027@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
112028 net->ipv6.sysctl.icmpv6_time = 1*HZ;
112029 net->ipv6.sysctl.flowlabel_consistency = 1;
112030 net->ipv6.sysctl.auto_flowlabels = 0;
112031- atomic_set(&net->ipv6.fib6_sernum, 1);
112032+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
112033
112034 err = ipv6_init_mibs(net);
112035 if (err)
112036diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
112037index ace8dac..bd6942d 100644
112038--- a/net/ipv6/datagram.c
112039+++ b/net/ipv6/datagram.c
112040@@ -957,5 +957,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
112041 0,
112042 sock_i_ino(sp),
112043 atomic_read(&sp->sk_refcnt), sp,
112044- atomic_read(&sp->sk_drops));
112045+ atomic_read_unchecked(&sp->sk_drops));
112046 }
112047diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
112048index a5e9519..16b7412 100644
112049--- a/net/ipv6/icmp.c
112050+++ b/net/ipv6/icmp.c
112051@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
112052
112053 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
112054 {
112055- struct ctl_table *table;
112056+ ctl_table_no_const *table;
112057
112058 table = kmemdup(ipv6_icmp_table_template,
112059 sizeof(ipv6_icmp_table_template),
112060diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
112061index 263ef41..88c7be8 100644
112062--- a/net/ipv6/ip6_fib.c
112063+++ b/net/ipv6/ip6_fib.c
112064@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
112065 int new, old;
112066
112067 do {
112068- old = atomic_read(&net->ipv6.fib6_sernum);
112069+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
112070 new = old < INT_MAX ? old + 1 : 1;
112071- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
112072+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
112073 old, new) != old);
112074 return new;
112075 }
112076diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
112077index bc28b7d..a08feea 100644
112078--- a/net/ipv6/ip6_gre.c
112079+++ b/net/ipv6/ip6_gre.c
112080@@ -71,8 +71,8 @@ struct ip6gre_net {
112081 struct net_device *fb_tunnel_dev;
112082 };
112083
112084-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
112085-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
112086+static struct rtnl_link_ops ip6gre_link_ops;
112087+static struct rtnl_link_ops ip6gre_tap_ops;
112088 static int ip6gre_tunnel_init(struct net_device *dev);
112089 static void ip6gre_tunnel_setup(struct net_device *dev);
112090 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
112091@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
112092 }
112093
112094
112095-static struct inet6_protocol ip6gre_protocol __read_mostly = {
112096+static struct inet6_protocol ip6gre_protocol = {
112097 .handler = ip6gre_rcv,
112098 .err_handler = ip6gre_err,
112099 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
112100@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
112101 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
112102 };
112103
112104-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
112105+static struct rtnl_link_ops ip6gre_link_ops = {
112106 .kind = "ip6gre",
112107 .maxtype = IFLA_GRE_MAX,
112108 .policy = ip6gre_policy,
112109@@ -1665,7 +1665,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
112110 .get_link_net = ip6_tnl_get_link_net,
112111 };
112112
112113-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
112114+static struct rtnl_link_ops ip6gre_tap_ops = {
112115 .kind = "ip6gretap",
112116 .maxtype = IFLA_GRE_MAX,
112117 .policy = ip6gre_policy,
112118diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
112119index ddd94ec..b7cfefb 100644
112120--- a/net/ipv6/ip6_tunnel.c
112121+++ b/net/ipv6/ip6_tunnel.c
112122@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
112123
112124 static int ip6_tnl_dev_init(struct net_device *dev);
112125 static void ip6_tnl_dev_setup(struct net_device *dev);
112126-static struct rtnl_link_ops ip6_link_ops __read_mostly;
112127+static struct rtnl_link_ops ip6_link_ops;
112128
112129 static int ip6_tnl_net_id __read_mostly;
112130 struct ip6_tnl_net {
112131@@ -1780,7 +1780,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
112132 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
112133 };
112134
112135-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
112136+static struct rtnl_link_ops ip6_link_ops = {
112137 .kind = "ip6tnl",
112138 .maxtype = IFLA_IPTUN_MAX,
112139 .policy = ip6_tnl_policy,
112140diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
112141index 5fb9e21..92bf04b 100644
112142--- a/net/ipv6/ip6_vti.c
112143+++ b/net/ipv6/ip6_vti.c
112144@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
112145
112146 static int vti6_dev_init(struct net_device *dev);
112147 static void vti6_dev_setup(struct net_device *dev);
112148-static struct rtnl_link_ops vti6_link_ops __read_mostly;
112149+static struct rtnl_link_ops vti6_link_ops;
112150
112151 static int vti6_net_id __read_mostly;
112152 struct vti6_net {
112153@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
112154 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
112155 };
112156
112157-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
112158+static struct rtnl_link_ops vti6_link_ops = {
112159 .kind = "vti6",
112160 .maxtype = IFLA_VTI_MAX,
112161 .policy = vti6_policy,
112162diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
112163index 8d766d9..dcdfea7 100644
112164--- a/net/ipv6/ipv6_sockglue.c
112165+++ b/net/ipv6/ipv6_sockglue.c
112166@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
112167 if (sk->sk_type != SOCK_STREAM)
112168 return -ENOPROTOOPT;
112169
112170- msg.msg_control = optval;
112171+ msg.msg_control = (void __force_kernel *)optval;
112172 msg.msg_controllen = len;
112173 msg.msg_flags = flags;
112174
112175diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
112176index bb00c6f..16c90d7 100644
112177--- a/net/ipv6/netfilter/ip6_tables.c
112178+++ b/net/ipv6/netfilter/ip6_tables.c
112179@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
112180 #endif
112181
112182 static int get_info(struct net *net, void __user *user,
112183- const int *len, int compat)
112184+ int len, int compat)
112185 {
112186 char name[XT_TABLE_MAXNAMELEN];
112187 struct xt_table *t;
112188 int ret;
112189
112190- if (*len != sizeof(struct ip6t_getinfo)) {
112191- duprintf("length %u != %zu\n", *len,
112192+ if (len != sizeof(struct ip6t_getinfo)) {
112193+ duprintf("length %u != %zu\n", len,
112194 sizeof(struct ip6t_getinfo));
112195 return -EINVAL;
112196 }
112197@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
112198 info.size = private->size;
112199 strcpy(info.name, name);
112200
112201- if (copy_to_user(user, &info, *len) != 0)
112202+ if (copy_to_user(user, &info, len) != 0)
112203 ret = -EFAULT;
112204 else
112205 ret = 0;
112206@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
112207
112208 switch (cmd) {
112209 case IP6T_SO_GET_INFO:
112210- ret = get_info(sock_net(sk), user, len, 1);
112211+ ret = get_info(sock_net(sk), user, *len, 1);
112212 break;
112213 case IP6T_SO_GET_ENTRIES:
112214 ret = compat_get_entries(sock_net(sk), user, len);
112215@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
112216
112217 switch (cmd) {
112218 case IP6T_SO_GET_INFO:
112219- ret = get_info(sock_net(sk), user, len, 0);
112220+ ret = get_info(sock_net(sk), user, *len, 0);
112221 break;
112222
112223 case IP6T_SO_GET_ENTRIES:
112224diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
112225index 6f187c8..34b367f 100644
112226--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
112227+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
112228@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
112229
112230 static int nf_ct_frag6_sysctl_register(struct net *net)
112231 {
112232- struct ctl_table *table;
112233+ ctl_table_no_const *table = NULL;
112234 struct ctl_table_header *hdr;
112235
112236- table = nf_ct_frag6_sysctl_table;
112237 if (!net_eq(net, &init_net)) {
112238- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
112239+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
112240 GFP_KERNEL);
112241 if (table == NULL)
112242 goto err_alloc;
112243@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
112244 table[2].data = &net->nf_frag.frags.high_thresh;
112245 table[2].extra1 = &net->nf_frag.frags.low_thresh;
112246 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
112247- }
112248-
112249- hdr = register_net_sysctl(net, "net/netfilter", table);
112250+ hdr = register_net_sysctl(net, "net/netfilter", table);
112251+ } else
112252+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
112253 if (hdr == NULL)
112254 goto err_reg;
112255
112256@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
112257 return 0;
112258
112259 err_reg:
112260- if (!net_eq(net, &init_net))
112261- kfree(table);
112262+ kfree(table);
112263 err_alloc:
112264 return -ENOMEM;
112265 }
112266diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
112267index a2dfff6..1e52e6d 100644
112268--- a/net/ipv6/ping.c
112269+++ b/net/ipv6/ping.c
112270@@ -241,6 +241,24 @@ static struct pernet_operations ping_v6_net_ops = {
112271 };
112272 #endif
112273
112274+static struct pingv6_ops real_pingv6_ops = {
112275+ .ipv6_recv_error = ipv6_recv_error,
112276+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
112277+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
112278+ .icmpv6_err_convert = icmpv6_err_convert,
112279+ .ipv6_icmp_error = ipv6_icmp_error,
112280+ .ipv6_chk_addr = ipv6_chk_addr,
112281+};
112282+
112283+static struct pingv6_ops dummy_pingv6_ops = {
112284+ .ipv6_recv_error = dummy_ipv6_recv_error,
112285+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
112286+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
112287+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
112288+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
112289+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
112290+};
112291+
112292 int __init pingv6_init(void)
112293 {
112294 #ifdef CONFIG_PROC_FS
112295@@ -248,13 +266,7 @@ int __init pingv6_init(void)
112296 if (ret)
112297 return ret;
112298 #endif
112299- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
112300- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
112301- pingv6_ops.ip6_datagram_recv_specific_ctl =
112302- ip6_datagram_recv_specific_ctl;
112303- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
112304- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
112305- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
112306+ pingv6_ops = &real_pingv6_ops;
112307 return inet6_register_protosw(&pingv6_protosw);
112308 }
112309
112310@@ -263,14 +275,9 @@ int __init pingv6_init(void)
112311 */
112312 void pingv6_exit(void)
112313 {
112314- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
112315- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
112316- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
112317- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
112318- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
112319- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
112320 #ifdef CONFIG_PROC_FS
112321 unregister_pernet_subsys(&ping_v6_net_ops);
112322 #endif
112323+ pingv6_ops = &dummy_pingv6_ops;
112324 inet6_unregister_protosw(&pingv6_protosw);
112325 }
112326diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
112327index 679253d0..70b653c 100644
112328--- a/net/ipv6/proc.c
112329+++ b/net/ipv6/proc.c
112330@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
112331 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
112332 goto proc_snmp6_fail;
112333
112334- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
112335+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
112336 if (!net->mib.proc_net_devsnmp6)
112337 goto proc_dev_snmp6_fail;
112338 return 0;
112339diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
112340index dae7f1a..783b20d 100644
112341--- a/net/ipv6/raw.c
112342+++ b/net/ipv6/raw.c
112343@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
112344 {
112345 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
112346 skb_checksum_complete(skb)) {
112347- atomic_inc(&sk->sk_drops);
112348+ atomic_inc_unchecked(&sk->sk_drops);
112349 kfree_skb(skb);
112350 return NET_RX_DROP;
112351 }
112352@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
112353 struct raw6_sock *rp = raw6_sk(sk);
112354
112355 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
112356- atomic_inc(&sk->sk_drops);
112357+ atomic_inc_unchecked(&sk->sk_drops);
112358 kfree_skb(skb);
112359 return NET_RX_DROP;
112360 }
112361@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
112362
112363 if (inet->hdrincl) {
112364 if (skb_checksum_complete(skb)) {
112365- atomic_inc(&sk->sk_drops);
112366+ atomic_inc_unchecked(&sk->sk_drops);
112367 kfree_skb(skb);
112368 return NET_RX_DROP;
112369 }
112370@@ -609,7 +609,7 @@ out:
112371 return err;
112372 }
112373
112374-static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
112375+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, unsigned int length,
112376 struct flowi6 *fl6, struct dst_entry **dstp,
112377 unsigned int flags)
112378 {
112379@@ -915,12 +915,15 @@ do_confirm:
112380 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
112381 char __user *optval, int optlen)
112382 {
112383+ struct icmp6_filter filter;
112384+
112385 switch (optname) {
112386 case ICMPV6_FILTER:
112387 if (optlen > sizeof(struct icmp6_filter))
112388 optlen = sizeof(struct icmp6_filter);
112389- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
112390+ if (copy_from_user(&filter, optval, optlen))
112391 return -EFAULT;
112392+ raw6_sk(sk)->filter = filter;
112393 return 0;
112394 default:
112395 return -ENOPROTOOPT;
112396@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
112397 char __user *optval, int __user *optlen)
112398 {
112399 int len;
112400+ struct icmp6_filter filter;
112401
112402 switch (optname) {
112403 case ICMPV6_FILTER:
112404@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
112405 len = sizeof(struct icmp6_filter);
112406 if (put_user(len, optlen))
112407 return -EFAULT;
112408- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
112409+ filter = raw6_sk(sk)->filter;
112410+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
112411 return -EFAULT;
112412 return 0;
112413 default:
112414diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
112415index d7d70e6..bd5e9fc 100644
112416--- a/net/ipv6/reassembly.c
112417+++ b/net/ipv6/reassembly.c
112418@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
112419
112420 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
112421 {
112422- struct ctl_table *table;
112423+ ctl_table_no_const *table = NULL;
112424 struct ctl_table_header *hdr;
112425
112426- table = ip6_frags_ns_ctl_table;
112427 if (!net_eq(net, &init_net)) {
112428- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
112429+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
112430 if (table == NULL)
112431 goto err_alloc;
112432
112433@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
112434 /* Don't export sysctls to unprivileged users */
112435 if (net->user_ns != &init_user_ns)
112436 table[0].procname = NULL;
112437- }
112438+ hdr = register_net_sysctl(net, "net/ipv6", table);
112439+ } else
112440+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
112441
112442- hdr = register_net_sysctl(net, "net/ipv6", table);
112443 if (hdr == NULL)
112444 goto err_reg;
112445
112446@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
112447 return 0;
112448
112449 err_reg:
112450- if (!net_eq(net, &init_net))
112451- kfree(table);
112452+ kfree(table);
112453 err_alloc:
112454 return -ENOMEM;
112455 }
112456diff --git a/net/ipv6/route.c b/net/ipv6/route.c
112457index 4688bd4..584453d 100644
112458--- a/net/ipv6/route.c
112459+++ b/net/ipv6/route.c
112460@@ -3029,7 +3029,7 @@ struct ctl_table ipv6_route_table_template[] = {
112461
112462 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
112463 {
112464- struct ctl_table *table;
112465+ ctl_table_no_const *table;
112466
112467 table = kmemdup(ipv6_route_table_template,
112468 sizeof(ipv6_route_table_template),
112469diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
112470index e4cbd57..02b1aaa 100644
112471--- a/net/ipv6/sit.c
112472+++ b/net/ipv6/sit.c
112473@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
112474 static void ipip6_dev_free(struct net_device *dev);
112475 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
112476 __be32 *v4dst);
112477-static struct rtnl_link_ops sit_link_ops __read_mostly;
112478+static struct rtnl_link_ops sit_link_ops;
112479
112480 static int sit_net_id __read_mostly;
112481 struct sit_net {
112482@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
112483 unregister_netdevice_queue(dev, head);
112484 }
112485
112486-static struct rtnl_link_ops sit_link_ops __read_mostly = {
112487+static struct rtnl_link_ops sit_link_ops = {
112488 .kind = "sit",
112489 .maxtype = IFLA_IPTUN_MAX,
112490 .policy = ipip6_policy,
112491diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
112492index c5c10fa..2577d51 100644
112493--- a/net/ipv6/sysctl_net_ipv6.c
112494+++ b/net/ipv6/sysctl_net_ipv6.c
112495@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
112496
112497 static int __net_init ipv6_sysctl_net_init(struct net *net)
112498 {
112499- struct ctl_table *ipv6_table;
112500+ ctl_table_no_const *ipv6_table;
112501 struct ctl_table *ipv6_route_table;
112502 struct ctl_table *ipv6_icmp_table;
112503 int err;
112504diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
112505index 1f5e622..8387d90 100644
112506--- a/net/ipv6/tcp_ipv6.c
112507+++ b/net/ipv6/tcp_ipv6.c
112508@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
112509 }
112510 }
112511
112512+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112513+extern int grsec_enable_blackhole;
112514+#endif
112515+
112516 static void tcp_v6_hash(struct sock *sk)
112517 {
112518 if (sk->sk_state != TCP_CLOSE) {
112519@@ -1345,6 +1349,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
112520 return 0;
112521
112522 reset:
112523+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112524+ if (!grsec_enable_blackhole)
112525+#endif
112526 tcp_v6_send_reset(sk, skb);
112527 discard:
112528 if (opt_skb)
112529@@ -1454,12 +1461,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
112530
112531 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
112532 inet6_iif(skb));
112533- if (!sk)
112534+ if (!sk) {
112535+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112536+ ret = 1;
112537+#endif
112538 goto no_tcp_socket;
112539+ }
112540
112541 process:
112542- if (sk->sk_state == TCP_TIME_WAIT)
112543+ if (sk->sk_state == TCP_TIME_WAIT) {
112544+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112545+ ret = 2;
112546+#endif
112547 goto do_time_wait;
112548+ }
112549
112550 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
112551 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
112552@@ -1510,6 +1525,10 @@ csum_error:
112553 bad_packet:
112554 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
112555 } else {
112556+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112557+ if (!grsec_enable_blackhole || (ret == 1 &&
112558+ (skb->dev->flags & IFF_LOOPBACK)))
112559+#endif
112560 tcp_v6_send_reset(NULL, skb);
112561 }
112562
112563diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
112564index d048d46..bf141c3 100644
112565--- a/net/ipv6/udp.c
112566+++ b/net/ipv6/udp.c
112567@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
112568 udp_ipv6_hash_secret + net_hash_mix(net));
112569 }
112570
112571+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112572+extern int grsec_enable_blackhole;
112573+#endif
112574+
112575 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
112576 {
112577 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
112578@@ -448,7 +452,7 @@ try_again:
112579 if (unlikely(err)) {
112580 trace_kfree_skb(skb, udpv6_recvmsg);
112581 if (!peeked) {
112582- atomic_inc(&sk->sk_drops);
112583+ atomic_inc_unchecked(&sk->sk_drops);
112584 if (is_udp4)
112585 UDP_INC_STATS_USER(sock_net(sk),
112586 UDP_MIB_INERRORS,
112587@@ -714,7 +718,7 @@ csum_error:
112588 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
112589 drop:
112590 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
112591- atomic_inc(&sk->sk_drops);
112592+ atomic_inc_unchecked(&sk->sk_drops);
112593 kfree_skb(skb);
112594 return -1;
112595 }
112596@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
112597 if (likely(skb1 == NULL))
112598 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
112599 if (!skb1) {
112600- atomic_inc(&sk->sk_drops);
112601+ atomic_inc_unchecked(&sk->sk_drops);
112602 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
112603 IS_UDPLITE(sk));
112604 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
112605@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
112606 goto csum_error;
112607
112608 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
112609+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112610+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
112611+#endif
112612 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
112613
112614 kfree_skb(skb);
112615diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
112616index 8d2d01b4..313511e 100644
112617--- a/net/ipv6/xfrm6_policy.c
112618+++ b/net/ipv6/xfrm6_policy.c
112619@@ -224,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
112620 }
112621 }
112622
112623-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
112624+static int xfrm6_garbage_collect(struct dst_ops *ops)
112625 {
112626 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
112627
112628- xfrm6_policy_afinfo.garbage_collect(net);
112629+ xfrm_garbage_collect_deferred(net);
112630 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
112631 }
112632
112633@@ -341,19 +341,19 @@ static struct ctl_table xfrm6_policy_table[] = {
112634
112635 static int __net_init xfrm6_net_init(struct net *net)
112636 {
112637- struct ctl_table *table;
112638+ ctl_table_no_const *table = NULL;
112639 struct ctl_table_header *hdr;
112640
112641- table = xfrm6_policy_table;
112642 if (!net_eq(net, &init_net)) {
112643- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
112644+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
112645 if (!table)
112646 goto err_alloc;
112647
112648 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
112649- }
112650+ hdr = register_net_sysctl(net, "net/ipv6", table);
112651+ } else
112652+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
112653
112654- hdr = register_net_sysctl(net, "net/ipv6", table);
112655 if (!hdr)
112656 goto err_reg;
112657
112658@@ -361,8 +361,7 @@ static int __net_init xfrm6_net_init(struct net *net)
112659 return 0;
112660
112661 err_reg:
112662- if (!net_eq(net, &init_net))
112663- kfree(table);
112664+ kfree(table);
112665 err_alloc:
112666 return -ENOMEM;
112667 }
112668diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
112669index c1d247e..9e5949d 100644
112670--- a/net/ipx/ipx_proc.c
112671+++ b/net/ipx/ipx_proc.c
112672@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
112673 struct proc_dir_entry *p;
112674 int rc = -ENOMEM;
112675
112676- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
112677+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
112678
112679 if (!ipx_proc_dir)
112680 goto out;
112681diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
112682index 683346d..cb0e12d 100644
112683--- a/net/irda/ircomm/ircomm_tty.c
112684+++ b/net/irda/ircomm/ircomm_tty.c
112685@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
112686 add_wait_queue(&port->open_wait, &wait);
112687
112688 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
112689- __FILE__, __LINE__, tty->driver->name, port->count);
112690+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
112691
112692 spin_lock_irqsave(&port->lock, flags);
112693- port->count--;
112694+ atomic_dec(&port->count);
112695 port->blocked_open++;
112696 spin_unlock_irqrestore(&port->lock, flags);
112697
112698@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
112699 }
112700
112701 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
112702- __FILE__, __LINE__, tty->driver->name, port->count);
112703+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
112704
112705 schedule();
112706 }
112707@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
112708
112709 spin_lock_irqsave(&port->lock, flags);
112710 if (!tty_hung_up_p(filp))
112711- port->count++;
112712+ atomic_inc(&port->count);
112713 port->blocked_open--;
112714 spin_unlock_irqrestore(&port->lock, flags);
112715
112716 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
112717- __FILE__, __LINE__, tty->driver->name, port->count);
112718+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
112719
112720 if (!retval)
112721 port->flags |= ASYNC_NORMAL_ACTIVE;
112722@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
112723
112724 /* ++ is not atomic, so this should be protected - Jean II */
112725 spin_lock_irqsave(&self->port.lock, flags);
112726- self->port.count++;
112727+ atomic_inc(&self->port.count);
112728 spin_unlock_irqrestore(&self->port.lock, flags);
112729 tty_port_tty_set(&self->port, tty);
112730
112731 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
112732- self->line, self->port.count);
112733+ self->line, atomic_read(&self->port.count));
112734
112735 /* Not really used by us, but lets do it anyway */
112736 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
112737@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
112738 tty_kref_put(port->tty);
112739 }
112740 port->tty = NULL;
112741- port->count = 0;
112742+ atomic_set(&port->count, 0);
112743 spin_unlock_irqrestore(&port->lock, flags);
112744
112745 wake_up_interruptible(&port->open_wait);
112746@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
112747 seq_putc(m, '\n');
112748
112749 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
112750- seq_printf(m, "Open count: %d\n", self->port.count);
112751+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
112752 seq_printf(m, "Max data size: %d\n", self->max_data_size);
112753 seq_printf(m, "Max header size: %d\n", self->max_header_size);
112754
112755diff --git a/net/irda/irproc.c b/net/irda/irproc.c
112756index b9ac598..f88cc56 100644
112757--- a/net/irda/irproc.c
112758+++ b/net/irda/irproc.c
112759@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
112760 {
112761 int i;
112762
112763- proc_irda = proc_mkdir("irda", init_net.proc_net);
112764+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
112765 if (proc_irda == NULL)
112766 return;
112767
112768diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
112769index 53d9311..cbaf99f 100644
112770--- a/net/iucv/af_iucv.c
112771+++ b/net/iucv/af_iucv.c
112772@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
112773 {
112774 char name[12];
112775
112776- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
112777+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
112778 while (__iucv_get_sock_by_name(name)) {
112779 sprintf(name, "%08x",
112780- atomic_inc_return(&iucv_sk_list.autobind_name));
112781+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
112782 }
112783 memcpy(iucv->src_name, name, 8);
112784 }
112785diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
112786index 2a6a1fd..6c112b0 100644
112787--- a/net/iucv/iucv.c
112788+++ b/net/iucv/iucv.c
112789@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
112790 return NOTIFY_OK;
112791 }
112792
112793-static struct notifier_block __refdata iucv_cpu_notifier = {
112794+static struct notifier_block iucv_cpu_notifier = {
112795 .notifier_call = iucv_cpu_notify,
112796 };
112797
112798diff --git a/net/key/af_key.c b/net/key/af_key.c
112799index f8ac939..1e189bf 100644
112800--- a/net/key/af_key.c
112801+++ b/net/key/af_key.c
112802@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
112803 static u32 get_acqseq(void)
112804 {
112805 u32 res;
112806- static atomic_t acqseq;
112807+ static atomic_unchecked_t acqseq;
112808
112809 do {
112810- res = atomic_inc_return(&acqseq);
112811+ res = atomic_inc_return_unchecked(&acqseq);
112812 } while (!res);
112813 return res;
112814 }
112815diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
112816index 781b3a2..73a7434 100644
112817--- a/net/l2tp/l2tp_eth.c
112818+++ b/net/l2tp/l2tp_eth.c
112819@@ -42,12 +42,12 @@ struct l2tp_eth {
112820 struct sock *tunnel_sock;
112821 struct l2tp_session *session;
112822 struct list_head list;
112823- atomic_long_t tx_bytes;
112824- atomic_long_t tx_packets;
112825- atomic_long_t tx_dropped;
112826- atomic_long_t rx_bytes;
112827- atomic_long_t rx_packets;
112828- atomic_long_t rx_errors;
112829+ atomic_long_unchecked_t tx_bytes;
112830+ atomic_long_unchecked_t tx_packets;
112831+ atomic_long_unchecked_t tx_dropped;
112832+ atomic_long_unchecked_t rx_bytes;
112833+ atomic_long_unchecked_t rx_packets;
112834+ atomic_long_unchecked_t rx_errors;
112835 };
112836
112837 /* via l2tp_session_priv() */
112838@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
112839 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
112840
112841 if (likely(ret == NET_XMIT_SUCCESS)) {
112842- atomic_long_add(len, &priv->tx_bytes);
112843- atomic_long_inc(&priv->tx_packets);
112844+ atomic_long_add_unchecked(len, &priv->tx_bytes);
112845+ atomic_long_inc_unchecked(&priv->tx_packets);
112846 } else {
112847- atomic_long_inc(&priv->tx_dropped);
112848+ atomic_long_inc_unchecked(&priv->tx_dropped);
112849 }
112850 return NETDEV_TX_OK;
112851 }
112852@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
112853 {
112854 struct l2tp_eth *priv = netdev_priv(dev);
112855
112856- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
112857- stats->tx_packets = atomic_long_read(&priv->tx_packets);
112858- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
112859- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
112860- stats->rx_packets = atomic_long_read(&priv->rx_packets);
112861- stats->rx_errors = atomic_long_read(&priv->rx_errors);
112862+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
112863+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
112864+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
112865+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
112866+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
112867+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
112868 return stats;
112869 }
112870
112871@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
112872 nf_reset(skb);
112873
112874 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
112875- atomic_long_inc(&priv->rx_packets);
112876- atomic_long_add(data_len, &priv->rx_bytes);
112877+ atomic_long_inc_unchecked(&priv->rx_packets);
112878+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
112879 } else {
112880- atomic_long_inc(&priv->rx_errors);
112881+ atomic_long_inc_unchecked(&priv->rx_errors);
112882 }
112883 return;
112884
112885 error:
112886- atomic_long_inc(&priv->rx_errors);
112887+ atomic_long_inc_unchecked(&priv->rx_errors);
112888 kfree_skb(skb);
112889 }
112890
112891diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
112892index 05dfc8aa..df6cfd7 100644
112893--- a/net/l2tp/l2tp_ip.c
112894+++ b/net/l2tp/l2tp_ip.c
112895@@ -608,7 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
112896 .ops = &l2tp_ip_ops,
112897 };
112898
112899-static struct net_protocol l2tp_ip_protocol __read_mostly = {
112900+static const struct net_protocol l2tp_ip_protocol = {
112901 .handler = l2tp_ip_recv,
112902 .netns_ok = 1,
112903 };
112904diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
112905index 8611f1b..bc60a2d 100644
112906--- a/net/l2tp/l2tp_ip6.c
112907+++ b/net/l2tp/l2tp_ip6.c
112908@@ -757,7 +757,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
112909 .ops = &l2tp_ip6_ops,
112910 };
112911
112912-static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
112913+static const struct inet6_protocol l2tp_ip6_protocol = {
112914 .handler = l2tp_ip6_recv,
112915 };
112916
112917diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
112918index 1a3c7e0..80f8b0c 100644
112919--- a/net/llc/llc_proc.c
112920+++ b/net/llc/llc_proc.c
112921@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
112922 int rc = -ENOMEM;
112923 struct proc_dir_entry *p;
112924
112925- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
112926+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
112927 if (!llc_proc_dir)
112928 goto out;
112929
112930diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
112931index dd4ff36..3462997 100644
112932--- a/net/mac80211/cfg.c
112933+++ b/net/mac80211/cfg.c
112934@@ -581,7 +581,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
112935 ret = ieee80211_vif_use_channel(sdata, chandef,
112936 IEEE80211_CHANCTX_EXCLUSIVE);
112937 }
112938- } else if (local->open_count == local->monitors) {
112939+ } else if (local_read(&local->open_count) == local->monitors) {
112940 local->_oper_chandef = *chandef;
112941 ieee80211_hw_config(local, 0);
112942 }
112943@@ -3468,7 +3468,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
112944 else
112945 local->probe_req_reg--;
112946
112947- if (!local->open_count)
112948+ if (!local_read(&local->open_count))
112949 break;
112950
112951 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
112952@@ -3603,8 +3603,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
112953 if (chanctx_conf) {
112954 *chandef = sdata->vif.bss_conf.chandef;
112955 ret = 0;
112956- } else if (local->open_count > 0 &&
112957- local->open_count == local->monitors &&
112958+ } else if (local_read(&local->open_count) > 0 &&
112959+ local_read(&local->open_count) == local->monitors &&
112960 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
112961 if (local->use_chanctx)
112962 *chandef = local->monitor_chandef;
112963diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
112964index 8d53d65..a4ac794 100644
112965--- a/net/mac80211/ieee80211_i.h
112966+++ b/net/mac80211/ieee80211_i.h
112967@@ -29,6 +29,7 @@
112968 #include <net/ieee80211_radiotap.h>
112969 #include <net/cfg80211.h>
112970 #include <net/mac80211.h>
112971+#include <asm/local.h>
112972 #include "key.h"
112973 #include "sta_info.h"
112974 #include "debug.h"
112975@@ -1126,7 +1127,7 @@ struct ieee80211_local {
112976 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
112977 spinlock_t queue_stop_reason_lock;
112978
112979- int open_count;
112980+ local_t open_count;
112981 int monitors, cooked_mntrs;
112982 /* number of interfaces with corresponding FIF_ flags */
112983 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
112984diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
112985index 81a2751..c06a026 100644
112986--- a/net/mac80211/iface.c
112987+++ b/net/mac80211/iface.c
112988@@ -544,7 +544,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
112989 break;
112990 }
112991
112992- if (local->open_count == 0) {
112993+ if (local_read(&local->open_count) == 0) {
112994 res = drv_start(local);
112995 if (res)
112996 goto err_del_bss;
112997@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
112998 res = drv_add_interface(local, sdata);
112999 if (res)
113000 goto err_stop;
113001- } else if (local->monitors == 0 && local->open_count == 0) {
113002+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
113003 res = ieee80211_add_virtual_monitor(local);
113004 if (res)
113005 goto err_stop;
113006@@ -701,7 +701,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113007 atomic_inc(&local->iff_promiscs);
113008
113009 if (coming_up)
113010- local->open_count++;
113011+ local_inc(&local->open_count);
113012
113013 if (hw_reconf_flags)
113014 ieee80211_hw_config(local, hw_reconf_flags);
113015@@ -739,7 +739,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113016 err_del_interface:
113017 drv_remove_interface(local, sdata);
113018 err_stop:
113019- if (!local->open_count)
113020+ if (!local_read(&local->open_count))
113021 drv_stop(local);
113022 err_del_bss:
113023 sdata->bss = NULL;
113024@@ -907,7 +907,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113025 }
113026
113027 if (going_down)
113028- local->open_count--;
113029+ local_dec(&local->open_count);
113030
113031 switch (sdata->vif.type) {
113032 case NL80211_IFTYPE_AP_VLAN:
113033@@ -969,7 +969,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113034 }
113035 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
113036
113037- if (local->open_count == 0)
113038+ if (local_read(&local->open_count) == 0)
113039 ieee80211_clear_tx_pending(local);
113040
113041 /*
113042@@ -1012,7 +1012,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113043 if (cancel_scan)
113044 flush_delayed_work(&local->scan_work);
113045
113046- if (local->open_count == 0) {
113047+ if (local_read(&local->open_count) == 0) {
113048 ieee80211_stop_device(local);
113049
113050 /* no reconfiguring after stop! */
113051@@ -1023,7 +1023,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113052 ieee80211_configure_filter(local);
113053 ieee80211_hw_config(local, hw_reconf_flags);
113054
113055- if (local->monitors == local->open_count)
113056+ if (local->monitors == local_read(&local->open_count))
113057 ieee80211_add_virtual_monitor(local);
113058 }
113059
113060diff --git a/net/mac80211/main.c b/net/mac80211/main.c
113061index 5e09d35..e2fdbe2 100644
113062--- a/net/mac80211/main.c
113063+++ b/net/mac80211/main.c
113064@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
113065 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
113066 IEEE80211_CONF_CHANGE_POWER);
113067
113068- if (changed && local->open_count) {
113069+ if (changed && local_read(&local->open_count)) {
113070 ret = drv_config(local, changed);
113071 /*
113072 * Goal:
113073diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
113074index ca405b6..6cc8bee 100644
113075--- a/net/mac80211/pm.c
113076+++ b/net/mac80211/pm.c
113077@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113078 struct ieee80211_sub_if_data *sdata;
113079 struct sta_info *sta;
113080
113081- if (!local->open_count)
113082+ if (!local_read(&local->open_count))
113083 goto suspend;
113084
113085 ieee80211_scan_cancel(local);
113086@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113087 cancel_work_sync(&local->dynamic_ps_enable_work);
113088 del_timer_sync(&local->dynamic_ps_timer);
113089
113090- local->wowlan = wowlan && local->open_count;
113091+ local->wowlan = wowlan && local_read(&local->open_count);
113092 if (local->wowlan) {
113093 int err = drv_suspend(local, wowlan);
113094 if (err < 0) {
113095@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113096 WARN_ON(!list_empty(&local->chanctx_list));
113097
113098 /* stop hardware - this must stop RX */
113099- if (local->open_count)
113100+ if (local_read(&local->open_count))
113101 ieee80211_stop_device(local);
113102
113103 suspend:
113104diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
113105index d53355b..21f583a 100644
113106--- a/net/mac80211/rate.c
113107+++ b/net/mac80211/rate.c
113108@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
113109
113110 ASSERT_RTNL();
113111
113112- if (local->open_count)
113113+ if (local_read(&local->open_count))
113114 return -EBUSY;
113115
113116 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
113117diff --git a/net/mac80211/util.c b/net/mac80211/util.c
113118index 747bdcf..eb2b981 100644
113119--- a/net/mac80211/util.c
113120+++ b/net/mac80211/util.c
113121@@ -1741,7 +1741,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113122 bool sched_scan_stopped = false;
113123
113124 /* nothing to do if HW shouldn't run */
113125- if (!local->open_count)
113126+ if (!local_read(&local->open_count))
113127 goto wake_up;
113128
113129 #ifdef CONFIG_PM
113130@@ -1993,7 +1993,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113131 local->in_reconfig = false;
113132 barrier();
113133
113134- if (local->monitors == local->open_count && local->monitors > 0)
113135+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
113136 ieee80211_add_virtual_monitor(local);
113137
113138 /*
113139@@ -2048,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113140 * If this is for hw restart things are still running.
113141 * We may want to change that later, however.
113142 */
113143- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
113144+ if (local_read(&local->open_count) && (!local->suspended || reconfig_due_to_wowlan))
113145 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
113146
113147 if (!local->suspended)
113148@@ -2072,7 +2072,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113149 flush_delayed_work(&local->scan_work);
113150 }
113151
113152- if (local->open_count && !reconfig_due_to_wowlan)
113153+ if (local_read(&local->open_count) && !reconfig_due_to_wowlan)
113154 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
113155
113156 list_for_each_entry(sdata, &local->interfaces, list) {
113157diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
113158index b02660f..c0f791c 100644
113159--- a/net/netfilter/Kconfig
113160+++ b/net/netfilter/Kconfig
113161@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
113162
113163 To compile it as a module, choose M here. If unsure, say N.
113164
113165+config NETFILTER_XT_MATCH_GRADM
113166+ tristate '"gradm" match support'
113167+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
113168+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
113169+ ---help---
113170+ The gradm match allows to match on grsecurity RBAC being enabled.
113171+ It is useful when iptables rules are applied early on bootup to
113172+ prevent connections to the machine (except from a trusted host)
113173+ while the RBAC system is disabled.
113174+
113175 config NETFILTER_XT_MATCH_HASHLIMIT
113176 tristate '"hashlimit" match support'
113177 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
113178diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
113179index 89f73a9..e4e5bd9 100644
113180--- a/net/netfilter/Makefile
113181+++ b/net/netfilter/Makefile
113182@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
113183 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
113184 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
113185 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
113186+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
113187 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
113188 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
113189 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
113190diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
113191index d259da3..6a32b2c 100644
113192--- a/net/netfilter/ipset/ip_set_core.c
113193+++ b/net/netfilter/ipset/ip_set_core.c
113194@@ -1952,7 +1952,7 @@ done:
113195 return ret;
113196 }
113197
113198-static struct nf_sockopt_ops so_set __read_mostly = {
113199+static struct nf_sockopt_ops so_set = {
113200 .pf = PF_INET,
113201 .get_optmin = SO_IP_SET,
113202 .get_optmax = SO_IP_SET + 1,
113203diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
113204index b0f7b62..0541842 100644
113205--- a/net/netfilter/ipvs/ip_vs_conn.c
113206+++ b/net/netfilter/ipvs/ip_vs_conn.c
113207@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
113208 /* Increase the refcnt counter of the dest */
113209 ip_vs_dest_hold(dest);
113210
113211- conn_flags = atomic_read(&dest->conn_flags);
113212+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
113213 if (cp->protocol != IPPROTO_UDP)
113214 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
113215 flags = cp->flags;
113216@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
113217
113218 cp->control = NULL;
113219 atomic_set(&cp->n_control, 0);
113220- atomic_set(&cp->in_pkts, 0);
113221+ atomic_set_unchecked(&cp->in_pkts, 0);
113222
113223 cp->packet_xmit = NULL;
113224 cp->app = NULL;
113225@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
113226
113227 /* Don't drop the entry if its number of incoming packets is not
113228 located in [0, 8] */
113229- i = atomic_read(&cp->in_pkts);
113230+ i = atomic_read_unchecked(&cp->in_pkts);
113231 if (i > 8 || i < 0) return 0;
113232
113233 if (!todrop_rate[i]) return 0;
113234diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
113235index b87ca32..76c7799 100644
113236--- a/net/netfilter/ipvs/ip_vs_core.c
113237+++ b/net/netfilter/ipvs/ip_vs_core.c
113238@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
113239 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
113240 /* do not touch skb anymore */
113241
113242- atomic_inc(&cp->in_pkts);
113243+ atomic_inc_unchecked(&cp->in_pkts);
113244 ip_vs_conn_put(cp);
113245 return ret;
113246 }
113247@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
113248 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
113249 pkts = sysctl_sync_threshold(ipvs);
113250 else
113251- pkts = atomic_add_return(1, &cp->in_pkts);
113252+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113253
113254 if (ipvs->sync_state & IP_VS_STATE_MASTER)
113255 ip_vs_sync_conn(net, cp, pkts);
113256diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
113257index ed99448..3ba6cad 100644
113258--- a/net/netfilter/ipvs/ip_vs_ctl.c
113259+++ b/net/netfilter/ipvs/ip_vs_ctl.c
113260@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
113261 */
113262 ip_vs_rs_hash(ipvs, dest);
113263 }
113264- atomic_set(&dest->conn_flags, conn_flags);
113265+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
113266
113267 /* bind the service */
113268 old_svc = rcu_dereference_protected(dest->svc, 1);
113269@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
113270 * align with netns init in ip_vs_control_net_init()
113271 */
113272
113273-static struct ctl_table vs_vars[] = {
113274+static ctl_table_no_const vs_vars[] __read_only = {
113275 {
113276 .procname = "amemthresh",
113277 .maxlen = sizeof(int),
113278@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
113279 " %-7s %-6d %-10d %-10d\n",
113280 &dest->addr.in6,
113281 ntohs(dest->port),
113282- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
113283+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
113284 atomic_read(&dest->weight),
113285 atomic_read(&dest->activeconns),
113286 atomic_read(&dest->inactconns));
113287@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
113288 "%-7s %-6d %-10d %-10d\n",
113289 ntohl(dest->addr.ip),
113290 ntohs(dest->port),
113291- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
113292+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
113293 atomic_read(&dest->weight),
113294 atomic_read(&dest->activeconns),
113295 atomic_read(&dest->inactconns));
113296@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
113297
113298 entry.addr = dest->addr.ip;
113299 entry.port = dest->port;
113300- entry.conn_flags = atomic_read(&dest->conn_flags);
113301+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
113302 entry.weight = atomic_read(&dest->weight);
113303 entry.u_threshold = dest->u_threshold;
113304 entry.l_threshold = dest->l_threshold;
113305@@ -3040,7 +3040,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
113306 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
113307 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
113308 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
113309- (atomic_read(&dest->conn_flags) &
113310+ (atomic_read_unchecked(&dest->conn_flags) &
113311 IP_VS_CONN_F_FWD_MASK)) ||
113312 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
113313 atomic_read(&dest->weight)) ||
113314@@ -3675,7 +3675,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
113315 {
113316 int idx;
113317 struct netns_ipvs *ipvs = net_ipvs(net);
113318- struct ctl_table *tbl;
113319+ ctl_table_no_const *tbl;
113320
113321 atomic_set(&ipvs->dropentry, 0);
113322 spin_lock_init(&ipvs->dropentry_lock);
113323diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
113324index 127f140..553d652 100644
113325--- a/net/netfilter/ipvs/ip_vs_lblc.c
113326+++ b/net/netfilter/ipvs/ip_vs_lblc.c
113327@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
113328 * IPVS LBLC sysctl table
113329 */
113330 #ifdef CONFIG_SYSCTL
113331-static struct ctl_table vs_vars_table[] = {
113332+static ctl_table_no_const vs_vars_table[] __read_only = {
113333 {
113334 .procname = "lblc_expiration",
113335 .data = NULL,
113336diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
113337index 2229d2d..b32b785 100644
113338--- a/net/netfilter/ipvs/ip_vs_lblcr.c
113339+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
113340@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
113341 * IPVS LBLCR sysctl table
113342 */
113343
113344-static struct ctl_table vs_vars_table[] = {
113345+static ctl_table_no_const vs_vars_table[] __read_only = {
113346 {
113347 .procname = "lblcr_expiration",
113348 .data = NULL,
113349diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
113350index d93ceeb..4556144 100644
113351--- a/net/netfilter/ipvs/ip_vs_sync.c
113352+++ b/net/netfilter/ipvs/ip_vs_sync.c
113353@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
113354 cp = cp->control;
113355 if (cp) {
113356 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
113357- pkts = atomic_add_return(1, &cp->in_pkts);
113358+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113359 else
113360 pkts = sysctl_sync_threshold(ipvs);
113361 ip_vs_sync_conn(net, cp->control, pkts);
113362@@ -771,7 +771,7 @@ control:
113363 if (!cp)
113364 return;
113365 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
113366- pkts = atomic_add_return(1, &cp->in_pkts);
113367+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113368 else
113369 pkts = sysctl_sync_threshold(ipvs);
113370 goto sloop;
113371@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
113372
113373 if (opt)
113374 memcpy(&cp->in_seq, opt, sizeof(*opt));
113375- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
113376+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
113377 cp->state = state;
113378 cp->old_state = cp->state;
113379 /*
113380diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
113381index 3aedbda..6a63567 100644
113382--- a/net/netfilter/ipvs/ip_vs_xmit.c
113383+++ b/net/netfilter/ipvs/ip_vs_xmit.c
113384@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
113385 else
113386 rc = NF_ACCEPT;
113387 /* do not touch skb anymore */
113388- atomic_inc(&cp->in_pkts);
113389+ atomic_inc_unchecked(&cp->in_pkts);
113390 goto out;
113391 }
113392
113393@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
113394 else
113395 rc = NF_ACCEPT;
113396 /* do not touch skb anymore */
113397- atomic_inc(&cp->in_pkts);
113398+ atomic_inc_unchecked(&cp->in_pkts);
113399 goto out;
113400 }
113401
113402diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
113403index a4b5e2a..13b1de3 100644
113404--- a/net/netfilter/nf_conntrack_acct.c
113405+++ b/net/netfilter/nf_conntrack_acct.c
113406@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
113407 #ifdef CONFIG_SYSCTL
113408 static int nf_conntrack_acct_init_sysctl(struct net *net)
113409 {
113410- struct ctl_table *table;
113411+ ctl_table_no_const *table;
113412
113413 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
113414 GFP_KERNEL);
113415diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
113416index 13fad86..18c984c 100644
113417--- a/net/netfilter/nf_conntrack_core.c
113418+++ b/net/netfilter/nf_conntrack_core.c
113419@@ -1733,6 +1733,10 @@ void nf_conntrack_init_end(void)
113420 #define DYING_NULLS_VAL ((1<<30)+1)
113421 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
113422
113423+#ifdef CONFIG_GRKERNSEC_HIDESYM
113424+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
113425+#endif
113426+
113427 int nf_conntrack_init_net(struct net *net)
113428 {
113429 int ret = -ENOMEM;
113430@@ -1758,7 +1762,11 @@ int nf_conntrack_init_net(struct net *net)
113431 if (!net->ct.stat)
113432 goto err_pcpu_lists;
113433
113434+#ifdef CONFIG_GRKERNSEC_HIDESYM
113435+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
113436+#else
113437 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
113438+#endif
113439 if (!net->ct.slabname)
113440 goto err_slabname;
113441
113442diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
113443index 4e78c57..ec8fb74 100644
113444--- a/net/netfilter/nf_conntrack_ecache.c
113445+++ b/net/netfilter/nf_conntrack_ecache.c
113446@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
113447 #ifdef CONFIG_SYSCTL
113448 static int nf_conntrack_event_init_sysctl(struct net *net)
113449 {
113450- struct ctl_table *table;
113451+ ctl_table_no_const *table;
113452
113453 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
113454 GFP_KERNEL);
113455diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
113456index bd9d315..989947e 100644
113457--- a/net/netfilter/nf_conntrack_helper.c
113458+++ b/net/netfilter/nf_conntrack_helper.c
113459@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
113460
113461 static int nf_conntrack_helper_init_sysctl(struct net *net)
113462 {
113463- struct ctl_table *table;
113464+ ctl_table_no_const *table;
113465
113466 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
113467 GFP_KERNEL);
113468diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
113469index b65d586..beec902 100644
113470--- a/net/netfilter/nf_conntrack_proto.c
113471+++ b/net/netfilter/nf_conntrack_proto.c
113472@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
113473
113474 static void
113475 nf_ct_unregister_sysctl(struct ctl_table_header **header,
113476- struct ctl_table **table,
113477+ ctl_table_no_const **table,
113478 unsigned int users)
113479 {
113480 if (users > 0)
113481diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
113482index fc823fa..8311af3 100644
113483--- a/net/netfilter/nf_conntrack_standalone.c
113484+++ b/net/netfilter/nf_conntrack_standalone.c
113485@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
113486
113487 static int nf_conntrack_standalone_init_sysctl(struct net *net)
113488 {
113489- struct ctl_table *table;
113490+ ctl_table_no_const *table;
113491
113492 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
113493 GFP_KERNEL);
113494diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
113495index 7a394df..bd91a8a 100644
113496--- a/net/netfilter/nf_conntrack_timestamp.c
113497+++ b/net/netfilter/nf_conntrack_timestamp.c
113498@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
113499 #ifdef CONFIG_SYSCTL
113500 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
113501 {
113502- struct ctl_table *table;
113503+ ctl_table_no_const *table;
113504
113505 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
113506 GFP_KERNEL);
113507diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
113508index 675d12c..b36e825 100644
113509--- a/net/netfilter/nf_log.c
113510+++ b/net/netfilter/nf_log.c
113511@@ -386,7 +386,7 @@ static const struct file_operations nflog_file_ops = {
113512
113513 #ifdef CONFIG_SYSCTL
113514 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
113515-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
113516+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
113517
113518 static int nf_log_proc_dostring(struct ctl_table *table, int write,
113519 void __user *buffer, size_t *lenp, loff_t *ppos)
113520@@ -417,13 +417,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
113521 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
113522 mutex_unlock(&nf_log_mutex);
113523 } else {
113524+ ctl_table_no_const nf_log_table = *table;
113525+
113526 mutex_lock(&nf_log_mutex);
113527 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
113528 if (!logger)
113529- table->data = "NONE";
113530+ nf_log_table.data = "NONE";
113531 else
113532- table->data = logger->name;
113533- r = proc_dostring(table, write, buffer, lenp, ppos);
113534+ nf_log_table.data = logger->name;
113535+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
113536 mutex_unlock(&nf_log_mutex);
113537 }
113538
113539diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
113540index c68c1e5..8b5d670 100644
113541--- a/net/netfilter/nf_sockopt.c
113542+++ b/net/netfilter/nf_sockopt.c
113543@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
113544 }
113545 }
113546
113547- list_add(&reg->list, &nf_sockopts);
113548+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
113549 out:
113550 mutex_unlock(&nf_sockopt_mutex);
113551 return ret;
113552@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
113553 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
113554 {
113555 mutex_lock(&nf_sockopt_mutex);
113556- list_del(&reg->list);
113557+ pax_list_del((struct list_head *)&reg->list);
113558 mutex_unlock(&nf_sockopt_mutex);
113559 }
113560 EXPORT_SYMBOL(nf_unregister_sockopt);
113561diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
113562index 11d85b3..7fcc420 100644
113563--- a/net/netfilter/nfnetlink_log.c
113564+++ b/net/netfilter/nfnetlink_log.c
113565@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
113566 struct nfnl_log_net {
113567 spinlock_t instances_lock;
113568 struct hlist_head instance_table[INSTANCE_BUCKETS];
113569- atomic_t global_seq;
113570+ atomic_unchecked_t global_seq;
113571 };
113572
113573 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
113574@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
113575 /* global sequence number */
113576 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
113577 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
113578- htonl(atomic_inc_return(&log->global_seq))))
113579+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
113580 goto nla_put_failure;
113581
113582 if (data_len) {
113583diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
113584index 65f3e2b..2e9d6a0 100644
113585--- a/net/netfilter/nft_compat.c
113586+++ b/net/netfilter/nft_compat.c
113587@@ -317,14 +317,7 @@ static void nft_match_eval(const struct nft_expr *expr,
113588 return;
113589 }
113590
113591- switch(ret) {
113592- case true:
113593- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
113594- break;
113595- case false:
113596- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
113597- break;
113598- }
113599+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
113600 }
113601
113602 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
113603diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
113604new file mode 100644
113605index 0000000..c566332
113606--- /dev/null
113607+++ b/net/netfilter/xt_gradm.c
113608@@ -0,0 +1,51 @@
113609+/*
113610+ * gradm match for netfilter
113611